code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
def __snake_case ( _UpperCAmelCase ):
return str(_UpperCAmelCase ) == str(_UpperCAmelCase )[::-1]
def __snake_case ( _UpperCAmelCase ):
return int(_UpperCAmelCase ) + int(str(_UpperCAmelCase )[::-1] )
def __snake_case ( _UpperCAmelCase = 10000 ):
__a = []
for num in range(1 , _UpperCAmelCase ):
__a = 0
__a = num
while iterations < 50:
__a = sum_reverse(_UpperCAmelCase )
iterations += 1
if is_palindrome(_UpperCAmelCase ):
break
else:
lychrel_nums.append(_UpperCAmelCase )
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 49 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''vit_msn'''
def __init__( self : Optional[int] , lowerCAmelCase__ : str=7_6_8 , lowerCAmelCase__ : List[str]=1_2 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : Optional[Any]=3_0_7_2 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : int=1e-06 , lowerCAmelCase__ : Union[str, Any]=2_2_4 , lowerCAmelCase__ : Optional[int]=1_6 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : str=True , **lowerCAmelCase__ : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Tuple = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Optional[int] = qkv_bias | 145 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Tuple = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 355 |
'''simple docstring'''
import random
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def snake_case__ ( lowerCAmelCase__ : str ) -> tuple[list[int], list[int]]:
'''simple docstring'''
_UpperCamelCase = [ord(lowerCAmelCase__ ) for i in text]
_UpperCamelCase = []
_UpperCamelCase = []
for i in plain:
_UpperCamelCase = random.randint(1 , 300 )
_UpperCamelCase = (i + k) * k
cipher.append(lowerCAmelCase__ )
key.append(lowerCAmelCase__ )
return cipher, key
@staticmethod
def snake_case__ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] ) -> str:
'''simple docstring'''
_UpperCamelCase = []
for i in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCAmelCase__ ) )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ , lowercase__ : List[str] = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 287 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( snake_case__ : Any ) -> Any:
UpperCamelCase : Union[str, Any] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> int:
UpperCamelCase , UpperCamelCase : Any = emb.weight.shape
UpperCamelCase : List[str] = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
UpperCamelCase : int = emb.weight.data
return lin_layer
def UpperCamelCase ( snake_case__ : str ) -> Dict:
UpperCamelCase : Union[str, Any] = torch.load(__UpperCamelCase , map_location='cpu' )
UpperCamelCase : List[str] = mam_aaa['args'] or mam_aaa['cfg']['model']
UpperCamelCase : Dict = mam_aaa['model']
remove_ignore_keys_(__UpperCamelCase )
UpperCamelCase : str = state_dict['encoder.embed_tokens.weight'].shape[0]
UpperCamelCase : Optional[Any] = MaMaaaConfig(
vocab_size=__UpperCamelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
UpperCamelCase : List[Any] = state_dict['decoder.embed_tokens.weight']
UpperCamelCase : Dict = MaMaaaForConditionalGeneration(__UpperCamelCase )
model.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
UpperCamelCase : Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 119 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__a :List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a :Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__a :Optional[int] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
__a :str = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = ElectraTokenizer
def __init__( self : Tuple , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=True , UpperCAmelCase : Any="[UNK]" , UpperCAmelCase : Union[str, Any]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase ) | 312 | 0 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _lowerCAmelCase ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
class _lowerCAmelCase ( __lowerCAmelCase, __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase = 1
@register_to_config
def __init__( self , _lowerCamelCase = 2000 , _lowerCamelCase = 0.15 , _lowerCamelCase = 0.01 , _lowerCamelCase = 1348.0 , _lowerCamelCase = 1e-5 , _lowerCamelCase = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
A_ : Any = sigma_max
# setable values
A_ : Dict = None
self.set_sigmas(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> torch.FloatTensor:
return sample
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> Tuple:
A_ : Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
A_ : Union[str, Any] = torch.linspace(1 , lowerCAmelCase_ , lowerCAmelCase_ , device=lowerCAmelCase_ )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None ) -> Any:
A_ : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
A_ : Optional[int] = sigma_max if sigma_max is not None else self.config.sigma_max
A_ : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
A_ : Optional[Any] = torch.exp(torch.linspace(math.log(lowerCAmelCase_ ) , math.log(lowerCAmelCase_ ) , lowerCAmelCase_ ) )
A_ : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> int:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
A_ : Optional[int] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
A_ : Any = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
A_ : Optional[Any] = timesteps.to(self.discrete_sigmas.device )
A_ : List[Any] = self.discrete_sigmas[timesteps].to(sample.device )
A_ : Dict = self.get_adjacent_sigma(lowerCAmelCase_ , lowerCAmelCase_ ).to(sample.device )
A_ : List[str] = torch.zeros_like(lowerCAmelCase_ )
A_ : Any = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
A_ : List[Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
A_ : Tuple = diffusion.unsqueeze(-1 )
A_ : str = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
A_ : Optional[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase_ , device=sample.device , dtype=sample.dtype )
A_ : Dict = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
A_ : int = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase_ , prev_sample_mean=lowerCAmelCase_ )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
A_ : Any = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
A_ : Tuple = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
A_ : Optional[int] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
A_ : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
A_ : List[str] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
A_ : List[Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
A_ : str = step_size.unsqueeze(-1 )
A_ : str = sample + step_size * model_output
A_ : Optional[Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A_ : Optional[Any] = timesteps.to(original_samples.device )
A_ : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
A_ : Optional[int] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase_ ) * sigmas[:, None, None, None]
)
A_ : Optional[Any] = noise + original_samples
return noisy_samples
def __len__( self ) -> List[str]:
return self.config.num_train_timesteps
| 365 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
UpperCamelCase__ : Optional[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
UpperCamelCase__ : Tuple = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
UpperCamelCase__ : Optional[Any] = 'zero2'
UpperCamelCase__ : Optional[int] = 'zero3'
UpperCamelCase__ : Dict = [ZEROa, ZEROa]
def UpperCAmelCase ( a_ , a_ , a_ ) -> int:
"""simple docstring"""
A_ : int = parameterized.to_safe_name("""_""".join(str(a_ ) for x in param.args ) )
return F"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
UpperCamelCase__ : Tuple = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> Dict:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> int:
self.run_and_check(
stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 10 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = True , ) -> List[str]:
A_ : Union[str, Any] = models[model]
A_ : Tuple = self.run_trainer(
stage=_lowerCamelCase , model_name=_lowerCamelCase , eval_steps=_lowerCamelCase , num_train_epochs=1 , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , )
self.do_checks(_lowerCamelCase )
return output_dir
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 10 , _lowerCamelCase = 1 , _lowerCamelCase = True , _lowerCamelCase = True , ) -> Any:
A_ : Dict = self.get_auto_remove_tmp_dir("""./xxx""" , after=_lowerCamelCase )
A_ : str = F"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(_lowerCamelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A_ : List[str] = F"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
A_ : Union[str, Any] = [F"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
A_ : Tuple = self.get_launcher(_lowerCamelCase )
A_ : Optional[int] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowerCamelCase , env=self.get_env() )
return output_dir
def UpperCAmelCase_ ( self , _lowerCamelCase=False ) -> Any:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
A_ : int = min(2 , get_gpu_count() ) if distributed else 1
return F"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 164 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : Optional[int] = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 167 |
"""simple docstring"""
from manim import *
class lowercase ( __UpperCAmelCase):
def a_ ( self : int ):
"""simple docstring"""
A_ : List[str] = Rectangle(height=0.5 , width=0.5 )
A_ : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : Optional[int] = [mem.copy() for i in range(6 )]
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : List[str] = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Dict = Text('''CPU''' , font_size=24 )
A_ : List[str] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
A_ : Optional[int] = [mem.copy() for i in range(1 )]
A_ : int = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : List[str] = Text('''GPU''' , font_size=24 )
A_ : List[str] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.align_to(_lowerCamelCase , _lowerCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCamelCase )
A_ : List[Any] = [mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = Text('''Model''' , font_size=24 )
A_ : Optional[int] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) , )
A_ : List[str] = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
A_ : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Dict = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=2.5 ) , Write(_lowerCamelCase ) , Write(_lowerCamelCase ) )
self.add(_lowerCamelCase )
A_ : str = []
A_ : Any = []
A_ : Tuple = []
for i, rect in enumerate(_lowerCamelCase ):
A_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.7 )
cpu_target.move_to(_lowerCamelCase )
cpu_target.generate_target()
A_ : List[str] = 0.46 / 4
A_ : List[Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowerCamelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowerCamelCase , buff=0.0 )
cpu_targs.append(_lowerCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCamelCase ) )
second_animations.append(MoveToTarget(_lowerCamelCase , run_time=1.5 ) )
self.play(*_lowerCamelCase )
self.play(*_lowerCamelCase )
self.wait()
| 167 | 1 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
UpperCamelCase_ : List[Any] = '''src/transformers'''
# Matches is_xxx_available()
UpperCamelCase_ : List[str] = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
UpperCamelCase_ : Union[str, Any] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCamelCase_ : Any = re.compile(R'''\s+\"\S*\":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
UpperCamelCase_ : int = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
UpperCamelCase_ : Dict = re.compile(R'''^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCamelCase_ : int = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCamelCase_ : Optional[int] = re.compile(R'''^\s+\"([^\"]+)\",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCamelCase_ : Dict = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
UpperCamelCase_ : Any = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
UpperCamelCase_ : List[Any] = re.compile(R'''^\s*try:''')
# Catches a line with else:
UpperCamelCase_ : Union[str, Any] = re.compile(R'''^\s*else:''')
def __a ( _UpperCamelCase: Tuple ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(_UpperCamelCase ) is None:
return None
_snake_case = [b[0] for b in _re_backend.findall(_UpperCamelCase )]
backends.sort()
return "_and_".join(_UpperCamelCase )
def __a ( _UpperCamelCase: Optional[int] ) -> List[Any]:
"""simple docstring"""
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_snake_case = f.readlines()
_snake_case = 0
while line_index < len(_UpperCamelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_snake_case = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_snake_case = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_UpperCamelCase ):
_snake_case = _re_one_line_import_struct.search(_UpperCamelCase ).groups()[0]
_snake_case = re.findall(r"\[([^\]]+)\]" , _UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_snake_case = _re_import_struct_key_value.search(_UpperCamelCase )
if single_line_import_search is not None:
_snake_case = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_snake_case = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_snake_case = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_snake_case = lines[line_index]
if _re_import_struct_add_one.search(_UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_UpperCamelCase ) is not None:
_snake_case = _re_import_struct_add_many.search(_UpperCamelCase ).groups()[0].split(", " )
_snake_case = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_between_brackets.search(_UpperCamelCase ) is not None:
_snake_case = _re_between_brackets.search(_UpperCamelCase ).groups()[0].split(", " )
_snake_case = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_quote_object.search(_UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(_UpperCamelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
_snake_case = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_snake_case = []
while (
line_index < len(_UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_snake_case = lines[line_index]
_snake_case = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_snake_case = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(_UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_snake_case = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_snake_case = lines[line_index]
_snake_case = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
_snake_case = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: int ) -> List[str]:
"""simple docstring"""
def find_duplicates(_UpperCamelCase: Any ):
return [k for k, v in collections.Counter(_UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_snake_case = []
for key in import_dict_objects.keys():
_snake_case = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_snake_case = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_snake_case = "base imports" if key == "none" else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __a ( ) -> Any:
"""simple docstring"""
_snake_case = []
for root, _, files in os.walk(_UpperCamelCase ):
if "__init__.py" in files:
_snake_case = os.path.join(_UpperCamelCase , "__init__.py" )
_snake_case = parse_init(_UpperCamelCase )
if objects is not None:
_snake_case = analyze_results(*_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_snake_case = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
raise ValueError("\n\n".join(_UpperCamelCase ) )
def __a ( ) -> Optional[int]:
"""simple docstring"""
_snake_case = []
for path, directories, files in os.walk(_UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(_UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_UpperCamelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
_snake_case = str((Path(_UpperCamelCase ) / folder).relative_to(_UpperCamelCase ) )
_snake_case = short_path.replace(os.path.sep , "." )
submodules.append(_UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
_snake_case = str((Path(_UpperCamelCase ) / fname).relative_to(_UpperCamelCase ) )
_snake_case = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(_UpperCamelCase )
return submodules
UpperCamelCase_ : List[str] = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __a ( ) -> List[str]:
"""simple docstring"""
from transformers.utils import direct_transformers_import
_snake_case = direct_transformers_import(_UpperCamelCase )
_snake_case = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_UpperCamelCase , "__init__.py" ) , "r" ) as f:
_snake_case = f.read()
import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]" , _UpperCamelCase ) ) )
_snake_case = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_UpperCamelCase ) > 0:
_snake_case = "\n".join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 358 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __a ( _UpperCamelCase: Tuple , _UpperCamelCase: Optional[int] , _UpperCamelCase: Optional[int]=None , **_UpperCamelCase: Any ) -> Optional[Any]:
"""simple docstring"""
_snake_case = [x.strip() for x in open(_UpperCamelCase ).readlines()]
_snake_case = [x.strip() for x in open(_UpperCamelCase ).readlines()][: len(_UpperCamelCase )]
_snake_case = calculate_rouge(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
if save_path is not None:
save_json(_UpperCamelCase , _UpperCamelCase , indent=_UpperCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 142 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[Any]:
super().__init__()
self.register_modules(
prior=__lowercase , image_encoder=__lowercase , image_processor=__lowercase , scheduler=__lowercase , renderer=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> List[Any]:
if latents is None:
__UpperCamelCase :Any = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase)
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""")
__UpperCamelCase :List[Any] = latents.to(__lowercase)
__UpperCamelCase :int = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , __lowercase=0) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCamelCase :Tuple = torch.device(f"""cuda:{gpu_id}""")
__UpperCamelCase :str = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowercase , __lowercase)
@property
def UpperCamelCase__ ( self) -> Tuple:
if self.device != torch.device('''meta''') or not hasattr(self.image_encoder , '''_hf_hook'''):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__lowercase , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , ) -> str:
if isinstance(__lowercase , __lowercase) and isinstance(image[0] , torch.Tensor):
__UpperCamelCase :Union[str, Any] = torch.cat(__lowercase , axis=0) if image[0].ndim == 4 else torch.stack(__lowercase , axis=0)
if not isinstance(__lowercase , torch.Tensor):
__UpperCamelCase :Any = self.image_processor(__lowercase , return_tensors='''pt''').pixel_values[0].unsqueeze(0)
__UpperCamelCase :Optional[Any] = image.to(dtype=self.image_encoder.dtype , device=__lowercase)
__UpperCamelCase :Union[str, Any] = self.image_encoder(__lowercase)['''last_hidden_state''']
__UpperCamelCase :Any = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__UpperCamelCase :Any = image_embeds.repeat_interleave(__lowercase , dim=0)
if do_classifier_free_guidance:
__UpperCamelCase :Dict = torch.zeros_like(__lowercase)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__UpperCamelCase :Any = torch.cat([negative_image_embeds, image_embeds])
return image_embeds
@torch.no_grad()
@replace_example_docstring(__lowercase)
def __call__( self , __lowercase , __lowercase = 1 , __lowercase = 25 , __lowercase = None , __lowercase = None , __lowercase = 4.0 , __lowercase = 64 , __lowercase = "pil" , __lowercase = True , ) -> List[Any]:
if isinstance(__lowercase , PIL.Image.Image):
__UpperCamelCase :List[Any] = 1
elif isinstance(__lowercase , torch.Tensor):
__UpperCamelCase :str = image.shape[0]
elif isinstance(__lowercase , __lowercase) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)):
__UpperCamelCase :Dict = len(__lowercase)
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__lowercase)}""")
__UpperCamelCase :Tuple = self._execution_device
__UpperCamelCase :List[Any] = batch_size * num_images_per_prompt
__UpperCamelCase :List[Any] = guidance_scale > 1.0
__UpperCamelCase :str = self._encode_image(__lowercase , __lowercase , __lowercase , __lowercase)
# prior
self.scheduler.set_timesteps(__lowercase , device=__lowercase)
__UpperCamelCase :str = self.scheduler.timesteps
__UpperCamelCase :str = self.prior.config.num_embeddings
__UpperCamelCase :Optional[Any] = self.prior.config.embedding_dim
__UpperCamelCase :List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __lowercase , __lowercase , __lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__UpperCamelCase :List[Any] = latents.reshape(latents.shape[0] , __lowercase , __lowercase)
for i, t in enumerate(self.progress_bar(__lowercase)):
# expand the latents if we are doing classifier free guidance
__UpperCamelCase :int = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCamelCase :Dict = self.scheduler.scale_model_input(__lowercase , __lowercase)
__UpperCamelCase :List[Any] = self.prior(
__lowercase , timestep=__lowercase , proj_embedding=__lowercase , ).predicted_image_embedding
# remove the variance
__UpperCamelCase , __UpperCamelCase :Dict = noise_pred.split(
scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = noise_pred.chunk(2)
__UpperCamelCase :Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__UpperCamelCase :Optional[int] = self.scheduler.step(
__lowercase , timestep=__lowercase , sample=__lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__lowercase)
__UpperCamelCase :List[Any] = []
for i, latent in enumerate(__lowercase):
print()
__UpperCamelCase :Any = self.renderer.decode(
latent[None, :] , __lowercase , size=__lowercase , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(__lowercase)
__UpperCamelCase :List[str] = torch.stack(__lowercase)
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""")
__UpperCamelCase :Optional[int] = images.cpu().numpy()
if output_type == "pil":
__UpperCamelCase :Optional[Any] = [self.numpy_to_pil(__lowercase) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''') and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__lowercase)
| 43 | import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (720, 1280) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 100
__lowercase = ''''''
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 250
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = update_image_and_anno(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase :List[Any] = random_chars(32 )
__UpperCamelCase :List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__UpperCamelCase :Optional[Any] = []
for anno in new_annos:
__UpperCamelCase :int = anno[3] - anno[1]
__UpperCamelCase :Optional[int] = anno[4] - anno[2]
__UpperCamelCase :int = anno[1] + width / 2
__UpperCamelCase :List[str] = anno[2] + height / 2
__UpperCamelCase :str = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = []
__UpperCamelCase :str = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
__UpperCamelCase :Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
__UpperCamelCase :str = in_file.readlines()
__UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" )
__UpperCamelCase :int = []
for obj_list in obj_lists:
__UpperCamelCase :Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
__UpperCamelCase :Any = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase :Dict = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , ):
'''simple docstring'''
__UpperCamelCase :List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :Optional[int] = int(scale_x * output_size[1] )
__UpperCamelCase :Any = int(scale_y * output_size[0] )
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = []
for i, index in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = all_annos[index]
__UpperCamelCase :Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE )
if i == 0: # top-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
__UpperCamelCase :Union[str, Any] = img
for bbox in img_annos:
__UpperCamelCase :Union[str, Any] = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = bbox[2] * scale_y
__UpperCamelCase :int = bbox[3] * scale_x
__UpperCamelCase :Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase :List[str] = img
for bbox in img_annos:
__UpperCamelCase :str = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Dict = bbox[2] * scale_y
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Tuple = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Tuple = bbox[3] * scale_x
__UpperCamelCase :Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase :Optional[int] = cva.resize(
SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase :List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase :Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 43 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case : List[Any] = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = ["""DeiTFeatureExtractor"""]
__snake_case : Optional[Any] = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__snake_case : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 122 |
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : int ) -> float:
"""simple docstring"""
lowerCAmelCase__ = x
lowerCAmelCase__ = y
for step in range(UpperCamelCase_ ): # noqa: B007
lowerCAmelCase__ = a * a - b * b + x
lowerCAmelCase__ = 2 * a * b + y
lowerCAmelCase__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( UpperCamelCase_ : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _UpperCamelCase ( UpperCamelCase_ : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCamelCase_ , 1 , 1 ) )
def _UpperCamelCase ( UpperCamelCase_ : int = 800 , UpperCamelCase_ : int = 600 , UpperCamelCase_ : float = -0.6 , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 3.2 , UpperCamelCase_ : int = 50 , UpperCamelCase_ : bool = True , ) -> Image.Image:
"""simple docstring"""
lowerCAmelCase__ = Image.new('RGB' , (image_width, image_height) )
lowerCAmelCase__ = img.load()
# loop through the image-coordinates
for image_x in range(UpperCamelCase_ ):
for image_y in range(UpperCamelCase_ ):
# determine the figure-coordinates based on the image-coordinates
lowerCAmelCase__ = figure_width / image_width * image_height
lowerCAmelCase__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCAmelCase__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCAmelCase__ = get_distance(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCAmelCase__ = get_color_coded_rgb(UpperCamelCase_ )
else:
lowerCAmelCase__ = get_black_and_white_rgb(UpperCamelCase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__snake_case : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 122 | 1 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__A = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__A = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__A = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowerCamelCase__ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ):
__lowerCamelCase : int = 0.0
for i, j in zip(lowerCamelCase__ , lowerCamelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCamelCase__ , lowerCamelCase__ ) else 0.0
__lowerCamelCase : Union[str, Any] = n_correct / len(lowerCamelCase__ )
return {
"accuracy": accuracy,
} | 135 | """simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def __lowerCAmelCase ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
def __lowerCAmelCase ( self : int ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 98 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : int = UnCLIPImageVariationPipeline
_UpperCAmelCase : Tuple = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
_UpperCAmelCase : int = IMAGE_VARIATION_BATCH_PARAMS
_UpperCAmelCase : Dict = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
_UpperCAmelCase : str = False
@property
def lowerCAmelCase ( self : List[Any]):
return 3_2
@property
def lowerCAmelCase ( self : Any):
return 3_2
@property
def lowerCAmelCase ( self : List[str]):
return self.time_input_dim
@property
def lowerCAmelCase ( self : Dict):
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Dict):
return 1_0_0
@property
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def lowerCAmelCase ( self : Tuple):
torch.manual_seed(0)
__lowerCamelCase : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(lowercase_)
@property
def lowerCAmelCase ( self : str):
torch.manual_seed(0)
__lowerCamelCase : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,num_hidden_layers=5 ,num_attention_heads=4 ,image_size=3_2 ,intermediate_size=3_7 ,patch_size=1 ,)
return CLIPVisionModelWithProjection(lowercase_)
@property
def lowerCAmelCase ( self : Optional[int]):
torch.manual_seed(0)
__lowerCamelCase : List[str] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
__lowerCamelCase : List[Any] = UnCLIPTextProjModel(**lowercase_)
return model
@property
def lowerCAmelCase ( self : Optional[int]):
torch.manual_seed(0)
__lowerCamelCase : Tuple = {
'sample_size': 3_2,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
__lowerCamelCase : Optional[Any] = UNetaDConditionModel(**lowercase_)
return model
@property
def lowerCAmelCase ( self : Union[str, Any]):
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowerCAmelCase ( self : Dict):
torch.manual_seed(0)
__lowerCamelCase : Union[str, Any] = UNetaDModel(**self.dummy_super_res_kwargs)
return model
@property
def lowerCAmelCase ( self : Optional[int]):
torch.manual_seed(1)
__lowerCamelCase : Optional[Any] = UNetaDModel(**self.dummy_super_res_kwargs)
return model
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = self.dummy_decoder
__lowerCamelCase : Optional[int] = self.dummy_text_proj
__lowerCamelCase : List[str] = self.dummy_text_encoder
__lowerCamelCase : str = self.dummy_tokenizer
__lowerCamelCase : int = self.dummy_super_res_first
__lowerCamelCase : List[str] = self.dummy_super_res_last
__lowerCamelCase : Any = UnCLIPScheduler(
variance_type='learned_range' ,prediction_type='epsilon' ,num_train_timesteps=1_0_0_0 ,)
__lowerCamelCase : Tuple = UnCLIPScheduler(
variance_type='fixed_small_log' ,prediction_type='epsilon' ,num_train_timesteps=1_0_0_0 ,)
__lowerCamelCase : str = CLIPImageProcessor(crop_size=3_2 ,size=3_2)
__lowerCamelCase : Tuple = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str]=0 ,SCREAMING_SNAKE_CASE__ : Dict=True):
__lowerCamelCase : str = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith('mps'):
__lowerCamelCase : Optional[int] = torch.manual_seed(lowercase_)
else:
__lowerCamelCase : Optional[int] = torch.Generator(device=lowercase_).manual_seed(lowercase_)
if pil_image:
__lowerCamelCase : int = input_image * 0.5 + 0.5
__lowerCamelCase : Union[str, Any] = input_image.clamp(0 ,1)
__lowerCamelCase : Optional[int] = input_image.cpu().permute(0 ,2 ,3 ,1).float().numpy()
__lowerCamelCase : Any = DiffusionPipeline.numpy_to_pil(lowercase_)[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = 'cpu'
__lowerCamelCase : Union[str, Any] = self.get_dummy_components()
__lowerCamelCase : Optional[Any] = self.pipeline_class(**lowercase_)
__lowerCamelCase : str = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
__lowerCamelCase : int = self.get_dummy_inputs(lowercase_ ,pil_image=lowercase_)
__lowerCamelCase : List[str] = pipe(**lowercase_)
__lowerCamelCase : str = output.images
__lowerCamelCase : List[Any] = self.get_dummy_inputs(lowercase_ ,pil_image=lowercase_)
__lowerCamelCase : Optional[int] = pipe(
**lowercase_ ,return_dict=lowercase_ ,)[0]
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : Optional[Any] = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = 'cpu'
__lowerCamelCase : str = self.get_dummy_components()
__lowerCamelCase : Optional[int] = self.pipeline_class(**lowercase_)
__lowerCamelCase : Tuple = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(lowercase_ ,pil_image=lowercase_)
__lowerCamelCase : List[str] = pipe(**lowercase_)
__lowerCamelCase : List[str] = output.images
__lowerCamelCase : str = self.get_dummy_inputs(lowercase_ ,pil_image=lowercase_)
__lowerCamelCase : Optional[int] = pipe(
**lowercase_ ,return_dict=lowercase_ ,)[0]
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : str = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Any = 'cpu'
__lowerCamelCase : Any = self.get_dummy_components()
__lowerCamelCase : Union[str, Any] = self.pipeline_class(**lowercase_)
__lowerCamelCase : Union[str, Any] = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
__lowerCamelCase : str = self.get_dummy_inputs(lowercase_ ,pil_image=lowercase_)
__lowerCamelCase : Optional[Any] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
__lowerCamelCase : List[Any] = pipe(**lowercase_)
__lowerCamelCase : List[Any] = output.images
__lowerCamelCase : int = self.get_dummy_inputs(lowercase_ ,pil_image=lowercase_)
__lowerCamelCase : List[str] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
__lowerCamelCase : List[Any] = pipe(
**lowercase_ ,return_dict=lowercase_ ,)[0]
__lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCamelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
__lowerCamelCase : Union[str, Any] = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Any = torch.device('cpu')
class A_ :
_UpperCAmelCase : List[Any] = 1
__lowerCamelCase : str = self.get_dummy_components()
__lowerCamelCase : Union[str, Any] = self.pipeline_class(**lowercase_)
__lowerCamelCase : Any = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
__lowerCamelCase : Union[str, Any] = torch.Generator(device=lowercase_).manual_seed(0)
__lowerCamelCase : Optional[int] = pipe.decoder.dtype
__lowerCamelCase : Tuple = 1
__lowerCamelCase : Tuple = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__lowerCamelCase : List[str] = pipe.prepare_latents(
lowercase_ ,dtype=lowercase_ ,device=lowercase_ ,generator=lowercase_ ,latents=lowercase_ ,scheduler=DummyScheduler())
__lowerCamelCase : Dict = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__lowerCamelCase : List[str] = pipe.prepare_latents(
lowercase_ ,dtype=lowercase_ ,device=lowercase_ ,generator=lowercase_ ,latents=lowercase_ ,scheduler=DummyScheduler())
__lowerCamelCase : str = self.get_dummy_inputs(lowercase_ ,pil_image=lowercase_)
__lowerCamelCase : Optional[int] = pipe(
**lowercase_ ,decoder_latents=lowercase_ ,super_res_latents=lowercase_).images
__lowerCamelCase : List[Any] = self.get_dummy_inputs(lowercase_ ,pil_image=lowercase_)
# Don't pass image, instead pass embedding
__lowerCamelCase : Any = pipeline_inputs.pop('image')
__lowerCamelCase : Tuple = pipe.image_encoder(lowercase_).image_embeds
__lowerCamelCase : Optional[Any] = pipe(
**lowercase_ ,decoder_latents=lowercase_ ,super_res_latents=lowercase_ ,image_embeddings=lowercase_ ,).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a).max() < 1E-4
@skip_mps
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[Any] = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__lowerCamelCase : Tuple = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ ,expected_max_diff=lowercase_)
@skip_mps
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Tuple = torch_device == 'cpu'
__lowerCamelCase : Any = True
__lowerCamelCase : List[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ ,relax_max_difference=lowercase_ ,additional_params_copy_to_batched_inputs=lowercase_ ,)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Union[str, Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__lowerCamelCase : Dict = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowercase_ ,additional_params_copy_to_batched_inputs=lowercase_ ,)
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowercase_)
@skip_mps
def lowerCAmelCase ( self : str):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self : Any):
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase ( self : Any):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[Any]):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png')
__lowerCamelCase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy')
__lowerCamelCase : Optional[int] = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' ,torch_dtype=torch.floataa)
__lowerCamelCase : str = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
__lowerCamelCase : Any = torch.Generator(device='cpu').manual_seed(0)
__lowerCamelCase : Tuple = pipeline(
lowercase_ ,generator=lowercase_ ,output_type='np' ,)
__lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(lowercase_ ,lowercase_ ,1_5)
| 369 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a ="""true"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=8_2 , lowerCamelCase__=1_6 ) -> List[Any]:
set_seed(4_2 )
__lowerCamelCase : Tuple = RegressionModel()
__lowerCamelCase : str = deepcopy(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = RegressionDataset(length=lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = DataLoader(lowerCamelCase__ , batch_size=lowerCamelCase__ )
model.to(accelerator.device )
__lowerCamelCase , __lowerCamelCase : Tuple = accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> List[Any]:
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
__lowerCamelCase : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
with accelerator.main_process_first():
__lowerCamelCase : Union[str, Any] = dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
__lowerCamelCase : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase__ ):
if use_longest:
return tokenizer.pad(lowerCamelCase__ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(lowerCamelCase__ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return DataLoader(lowerCamelCase__ , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1_6 )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : Optional[int] = Accelerator(dispatch_batches=lowerCamelCase__ , split_batches=lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = get_dataloader(lowerCamelCase__ , not dispatch_batches )
__lowerCamelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Tuple = accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : str = []
for batch in dataloader:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = batch.values()
with torch.no_grad():
__lowerCamelCase : Tuple = model(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowerCamelCase , __lowerCamelCase : Dict = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase__ )
targs.append(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = torch.cat(lowerCamelCase__ ), torch.cat(lowerCamelCase__ )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=8_2 , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=1_6 ) -> Dict:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = get_basic_setup(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Dict = generate_predictions(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert (
len(lowerCamelCase__ ) == num_samples
), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase__ )}"
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = False , lowerCamelCase__ = False ) -> Dict:
__lowerCamelCase : Dict = evaluate.load('glue' , 'mrpc' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = get_mrpc_setup(lowerCamelCase__ , lowerCamelCase__ )
# First do baseline
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = setup['no']
model.to(lowerCamelCase__ )
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase__ )
with torch.inference_mode():
__lowerCamelCase : Dict = model(**lowerCamelCase__ )
__lowerCamelCase : Any = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCamelCase__ , references=batch['labels'] )
__lowerCamelCase : str = metric.compute()
# Then do distributed
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCamelCase : List[str] = model(**lowerCamelCase__ )
__lowerCamelCase : List[Any] = outputs.logits.argmax(dim=-1 )
__lowerCamelCase : List[str] = batch['labels']
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCamelCase__ , references=lowerCamelCase__ )
__lowerCamelCase : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : int = Accelerator(split_batches=lowerCamelCase__ , dispatch_batches=lowerCamelCase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(lowerCamelCase__ , lowerCamelCase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCamelCase : Optional[Any] = Accelerator(split_batches=lowerCamelCase__ , dispatch_batches=lowerCamelCase__ )
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(lowerCamelCase__ , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
__lowerCamelCase : Dict = Accelerator()
test_torch_metrics(lowerCamelCase__ , 5_1_2 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 113 | 0 |
'''simple docstring'''
import numpy as np
def snake_case_ ( __SCREAMING_SNAKE_CASE : np.ndarray ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( __SCREAMING_SNAKE_CASE : np.ndarray ):
"""simple docstring"""
return vector * sigmoid(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase : List[str] = ["text", "image", "audio"]
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
inputs.append(create_inputs(__SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def snake_case_ ( __SCREAMING_SNAKE_CASE : List ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
for output in outputs:
if isinstance(__SCREAMING_SNAKE_CASE , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowerCAmelCase__ :
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , __SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : Tuple = self.tool(*__SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Any = [outputs]
self.assertListEqual(output_types(__SCREAMING_SNAKE_CASE ) , self.tool.outputs )
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : int = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
for output, output_type in zip(__SCREAMING_SNAKE_CASE , self.tool.outputs ):
lowercase_ : Optional[int] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : int = []
for _input, input_type in zip(__SCREAMING_SNAKE_CASE , self.tool.inputs ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Optional[Any] = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
| 93 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __magic_name__ (__lowercase ):
lowerCamelCase__ = ''''''
lowerCamelCase__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase__ = None # compression type in fsspec. ex: "gzip"
lowerCamelCase__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _a = "" , _a = None , _a = None , **_a ) -> str:
super().__init__(self , **_a )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCAmelCase_ = fsspec.open(
_a , mode="rb" , protocol=_a , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCAmelCase_ = os.path.basename(self.file.path.split("::" )[0] )
lowerCAmelCase_ = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
lowerCAmelCase_ = None
@classmethod
def __a ( cls , _a ) -> Optional[Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_a ).lstrip("/" )
def __a ( self ) -> Tuple:
if self.dir_cache is None:
lowerCAmelCase_ = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
lowerCAmelCase_ = {f["name"]: f}
def __a ( self , _a ) -> List[str]:
return self.file.open().read()
def __a ( self , _a , _a = "rb" , _a=None , _a=True , _a=None , **_a , ) -> str:
lowerCAmelCase_ = self._strip_protocol(_a )
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''bz2'''
lowerCamelCase__ = '''bz2'''
lowerCamelCase__ = '''.bz2'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''gzip'''
lowerCamelCase__ = '''gzip'''
lowerCamelCase__ = '''.gz'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''lz4'''
lowerCamelCase__ = '''lz4'''
lowerCamelCase__ = '''.lz4'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''xz'''
lowerCamelCase__ = '''xz'''
lowerCamelCase__ = '''.xz'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''zstd'''
lowerCamelCase__ = '''zstd'''
lowerCamelCase__ = '''.zst'''
def __init__( self , _a , _a = "rb" , _a = None , _a = None , _a = DEFAULT_BLOCK_SIZE , **_a , ) -> Optional[Any]:
super().__init__(
fo=_a , mode=_a , target_protocol=_a , target_options=_a , block_size=_a , **_a , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCAmelCase_ = self.file.__enter__
class __magic_name__ :
def __init__( self , _a ) -> int:
lowerCAmelCase_ = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self , *_a , **_a ) -> Dict:
self._file.__exit__(*_a , **_a )
def __iter__( self ) -> Optional[Any]:
return iter(self._file )
def __a ( self ) -> Optional[int]:
return next(self._file )
def __getattr__( self , _a ) -> Union[str, Any]:
return getattr(self._file , _a )
def fixed_enter(*_a , **_a ):
return WrappedFile(_enter(*_a , **_a ) )
lowerCAmelCase_ = fixed_enter
| 22 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A(__a: Tuple , __a: Union[str, Any] ):
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a )
}
for i in range(__a ):
lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def A(__a: str , __a: str , ):
# Only support V1
lowerCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCAmelCase_ = io.BytesIO(r.content )
lowerCAmelCase_ = OmegaConf.load(__a )
lowerCAmelCase_ = 512
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__a , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__a )
else:
lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a )
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a )
lowerCAmelCase_ = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 22 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
a_ : Any = imread(R'digital_image_processing/image_data/lena_small.jpg')
a_ : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = cn.convert_to_negative(_UpperCAmelCase)
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase__ ():
with Image.open('digital_image_processing/image_data/lena_small.jpg') as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCAmelCase , 110)).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at')
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = imread('digital_image_processing/image_data/lena_small.jpg' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
SCREAMING_SNAKE_CASE = canny.canny(_UpperCAmelCase)
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase__ ():
assert gg.gaussian_filter(_UpperCAmelCase , 5 , sigma=0.9).all()
def lowerCamelCase__ ():
# laplace diagonals
SCREAMING_SNAKE_CASE = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
SCREAMING_SNAKE_CASE = conv.img_convolve(_UpperCAmelCase , _UpperCAmelCase).astype(_UpperCAmelCase)
assert res.any()
def lowerCamelCase__ ():
assert med.median_filter(_UpperCAmelCase , 3).any()
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sob.sobel_filter(_UpperCAmelCase)
assert grad.any() and theta.any()
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = sp.make_sepia(_UpperCAmelCase , 20)
assert sepia.all()
def lowerCamelCase__ (_UpperCAmelCase = "digital_image_processing/image_data/lena_small.jpg"):
SCREAMING_SNAKE_CASE = bs.Burkes(imread(_UpperCAmelCase , 1) , 120)
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase__ (_UpperCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
SCREAMING_SNAKE_CASE = rs.NearestNeighbour(imread(_UpperCAmelCase , 1) , 400 , 200)
nn.process()
assert nn.output.any()
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
SCREAMING_SNAKE_CASE = imread(_UpperCAmelCase , 0)
# Test for get_neighbors_pixel function() return not None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = image[x_coordinate][y_coordinate]
SCREAMING_SNAKE_CASE = lbp.get_neighbors_pixel(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
SCREAMING_SNAKE_CASE = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
SCREAMING_SNAKE_CASE = lbp.local_binary_value(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
assert lbp_image.any()
| 137 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = test_file.split(os.path.sep)
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F'''{test_file} instead.''')
SCREAMING_SNAKE_CASE = components[-1]
if not test_fn.endswith('py'):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''')
if not test_fn.startswith('test_modeling_'):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''')
SCREAMING_SNAKE_CASE = components[:-1] + [test_fn.replace('.py' , '')]
SCREAMING_SNAKE_CASE = '.'.join(_UpperCAmelCase)
return test_module_path
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_module_path(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = importlib.import_module(_UpperCAmelCase)
return test_module
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = get_test_module(_UpperCAmelCase)
for attr in dir(_UpperCAmelCase):
if attr.endswith('ModelTester'):
tester_classes.append(getattr(_UpperCAmelCase , _UpperCAmelCase))
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = get_test_module(_UpperCAmelCase)
for attr in dir(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , _UpperCAmelCase)
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'all_model_classes' , [])
if len(_UpperCAmelCase) > 0:
test_classes.append(_UpperCAmelCase)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = test_class()
if hasattr(_UpperCAmelCase , 'setUp'):
test.setUp()
SCREAMING_SNAKE_CASE = None
if hasattr(_UpperCAmelCase , 'model_tester'):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE = test.model_tester.__class__
return model_tester
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_UpperCAmelCase)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes_for_model(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
SCREAMING_SNAKE_CASE = get_model_tester_from_test_class(_UpperCAmelCase)
if tester_class is not None:
tester_classes.append(_UpperCAmelCase)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {test_class: get_model_tester_from_test_class(_UpperCAmelCase) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_model_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {
model_class: get_test_classes_for_model(_UpperCAmelCase , _UpperCAmelCase) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_model_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {
model_class: get_tester_classes_for_model(_UpperCAmelCase , _UpperCAmelCase) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase__ (_UpperCAmelCase):
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
return o
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
return o.__name__
elif isinstance(_UpperCAmelCase , (list, tuple)):
return [to_json(_UpperCAmelCase) for x in o]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
return {to_json(_UpperCAmelCase): to_json(_UpperCAmelCase) for k, v in o.items()}
else:
return o
| 137 | 1 |
def snake_case () -> Dict:
'''simple docstring'''
_snake_case : List[str] = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 284 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
@dataclass
class lowercase_ ( __snake_case ):
_lowerCamelCase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **lowercase_ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_snake_case : List[str] = deprecated_arg[3:]
_snake_case : Optional[int] = not kwargs.pop(lowercase_ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
_snake_case : Tuple = kwargs.pop("tpu_name" , self.tpu_name )
_snake_case : Any = kwargs.pop("device_idx" , self.device_idx )
_snake_case : List[str] = kwargs.pop("eager_mode" , self.eager_mode )
_snake_case : List[str] = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**lowercase_ )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'Name of TPU'} , )
_lowerCamelCase = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_lowerCamelCase = field(default=__snake_case , metadata={'help': 'Benchmark models in eager model.'} )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
_snake_case : str = None
if self.tpu:
try:
if self.tpu_name:
_snake_case : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_snake_case : Union[str, Any] = None
return tpu
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_snake_case : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase ( self ):
return self.n_gpu > 0 | 284 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''time_series_transformer'''
lowerCamelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowercase = None , lowercase = None , lowercase = "student_t" , lowercase = "nll" , lowercase = 1 , lowercase = [1, 2, 3, 4, 5, 6, 7] , lowercase = "mean" , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = 3_2 , lowercase = 3_2 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = True , lowercase = "gelu" , lowercase = 6_4 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 1_0_0 , lowercase = 0.02 , lowercase=True , **lowercase , ):
"""simple docstring"""
A_ : Tuple = prediction_length
A_ : Any = context_length or prediction_length
A_ : Any = distribution_output
A_ : Dict = loss
A_ : int = input_size
A_ : Any = num_time_features
A_ : List[str] = lags_sequence
A_ : List[Any] = scaling
A_ : str = num_dynamic_real_features
A_ : List[Any] = num_static_real_features
A_ : Optional[int] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A_ : Any = cardinality
else:
A_ : Any = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A_ : int = embedding_dimension
else:
A_ : Tuple = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
A_ : Any = input_size * len(lowercase ) + self._number_of_features
A_ : List[str] = d_model
A_ : Union[str, Any] = encoder_attention_heads
A_ : int = decoder_attention_heads
A_ : int = encoder_ffn_dim
A_ : str = decoder_ffn_dim
A_ : Tuple = encoder_layers
A_ : Tuple = decoder_layers
A_ : List[str] = dropout
A_ : str = attention_dropout
A_ : List[Any] = activation_dropout
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Optional[int] = activation_function
A_ : str = init_std
A_ : int = use_cache
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 140 | import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_UpperCAmelCase = """scheduler_config.json"""
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 5
lowerCamelCase_ = 6
lowerCamelCase_ = 7
lowerCamelCase_ = 8
lowerCamelCase_ = 9
lowerCamelCase_ = 1_0
lowerCamelCase_ = 1_1
lowerCamelCase_ = 1_2
lowerCamelCase_ = 1_3
lowerCamelCase_ = 1_4
@dataclass
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 42
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = SCHEDULER_CONFIG_NAME
lowerCamelCase_ = []
lowerCamelCase_ = True
@classmethod
def lowerCAmelCase_ ( cls , lowercase = None , lowercase = None , lowercase=False , **lowercase , ):
"""simple docstring"""
A_ , A_ , A_ : int = cls.load_config(
pretrained_model_name_or_path=lowercase , subfolder=lowercase , return_unused_kwargs=lowercase , return_commit_hash=lowercase , **lowercase , )
return cls.from_config(lowercase , return_unused_kwargs=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , **lowercase ):
"""simple docstring"""
self.save_config(save_directory=lowercase , push_to_hub=lowercase , **lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
A_ : Optional[Any] = list(set([cls.__name__] + cls._compatibles ) )
A_ : Any = importlib.import_module(__name__.split('.' )[0] )
A_ : Tuple = [
getattr(lowercase , lowercase ) for c in compatible_classes_str if hasattr(lowercase , lowercase )
]
return compatible_classes
| 140 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a__: Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase__( UpperCamelCase__ : Any )->Dict:
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self,__lowerCamelCase = True,__lowerCamelCase = None,__lowerCamelCase = PILImageResampling.BILINEAR,__lowerCamelCase = True,__lowerCamelCase = None,__lowerCamelCase = True,__lowerCamelCase = 1 / 255,__lowerCamelCase = True,__lowerCamelCase = True,__lowerCamelCase = None,__lowerCamelCase = None,**__lowerCamelCase,):
super().__init__(**_SCREAMING_SNAKE_CASE )
A__ = size if size is not None else {"""shortest_edge""": 256}
A__ = get_size_dict(_SCREAMING_SNAKE_CASE,default_to_square=_SCREAMING_SNAKE_CASE )
A__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A__ = get_size_dict(_SCREAMING_SNAKE_CASE,param_name='''crop_size''' )
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = offset
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = PILImageResampling.BILINEAR,__lowerCamelCase = None,**__lowerCamelCase,):
A__ = get_size_dict(_SCREAMING_SNAKE_CASE,default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" in size:
A__ = get_resize_output_image_size(_SCREAMING_SNAKE_CASE,size['''shortest_edge'''],default_to_square=_SCREAMING_SNAKE_CASE )
elif "height" in size and "width" in size:
A__ = (size["""height"""], size["""width"""])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(_SCREAMING_SNAKE_CASE,size=_SCREAMING_SNAKE_CASE,resample=_SCREAMING_SNAKE_CASE,data_format=_SCREAMING_SNAKE_CASE,**_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = None,**__lowerCamelCase,):
A__ = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(_SCREAMING_SNAKE_CASE,size=(size['''height'''], size['''width''']),data_format=_SCREAMING_SNAKE_CASE,**_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = True,__lowerCamelCase = None,**__lowerCamelCase,):
A__ = image.astype(np.floataa )
if offset:
A__ = image - (scale / 2)
return rescale(_SCREAMING_SNAKE_CASE,scale=_SCREAMING_SNAKE_CASE,data_format=_SCREAMING_SNAKE_CASE,**_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = None,**__lowerCamelCase,):
return normalize(_SCREAMING_SNAKE_CASE,mean=_SCREAMING_SNAKE_CASE,std=_SCREAMING_SNAKE_CASE,data_format=_SCREAMING_SNAKE_CASE,**_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = ChannelDimension.FIRST,):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
A__ = to_numpy_array(_SCREAMING_SNAKE_CASE )
if do_resize:
A__ = self.resize(image=_SCREAMING_SNAKE_CASE,size=_SCREAMING_SNAKE_CASE,resample=_SCREAMING_SNAKE_CASE )
if do_center_crop:
A__ = self.center_crop(_SCREAMING_SNAKE_CASE,size=_SCREAMING_SNAKE_CASE )
if do_rescale:
A__ = self.rescale(image=_SCREAMING_SNAKE_CASE,scale=_SCREAMING_SNAKE_CASE,offset=_SCREAMING_SNAKE_CASE )
if do_normalize:
A__ = self.normalize(image=_SCREAMING_SNAKE_CASE,mean=_SCREAMING_SNAKE_CASE,std=_SCREAMING_SNAKE_CASE )
A__ = to_channel_dimension_format(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
return image
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = ChannelDimension.FIRST,**__lowerCamelCase,):
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = offset if offset is not None else self.offset
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(_SCREAMING_SNAKE_CASE,default_to_square=_SCREAMING_SNAKE_CASE )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(_SCREAMING_SNAKE_CASE,param_name='''crop_size''' )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
A__ = make_batched(_SCREAMING_SNAKE_CASE )
A__ = [
[
self._preprocess_image(
image=_SCREAMING_SNAKE_CASE,do_resize=_SCREAMING_SNAKE_CASE,size=_SCREAMING_SNAKE_CASE,resample=_SCREAMING_SNAKE_CASE,do_center_crop=_SCREAMING_SNAKE_CASE,crop_size=_SCREAMING_SNAKE_CASE,do_rescale=_SCREAMING_SNAKE_CASE,rescale_factor=_SCREAMING_SNAKE_CASE,offset=_SCREAMING_SNAKE_CASE,do_normalize=_SCREAMING_SNAKE_CASE,image_mean=_SCREAMING_SNAKE_CASE,image_std=_SCREAMING_SNAKE_CASE,data_format=_SCREAMING_SNAKE_CASE,)
for img in video
]
for video in videos
]
A__ = {"""pixel_values""": videos}
return BatchFeature(data=_SCREAMING_SNAKE_CASE,tensor_type=_SCREAMING_SNAKE_CASE )
| 359 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''MCTCTFeatureExtractor'''
__SCREAMING_SNAKE_CASE = '''AutoTokenizer'''
def __init__( self,__lowerCamelCase,__lowerCamelCase ):
super().__init__(__lowerCamelCase,__lowerCamelCase )
A__ = self.feature_extractor
A__ = False
def __call__( self,*__lowerCamelCase,**__lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase,**__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
A__ = kwargs.pop('''raw_speech''' )
else:
A__ = kwargs.pop('''audio''',__lowerCamelCase )
A__ = kwargs.pop('''sampling_rate''',__lowerCamelCase )
A__ = kwargs.pop('''text''',__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
A__ = self.feature_extractor(__lowerCamelCase,*__lowerCamelCase,sampling_rate=__lowerCamelCase,**__lowerCamelCase )
if text is not None:
A__ = self.tokenizer(__lowerCamelCase,**__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A__ = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.batch_decode(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCamelCase,**__lowerCamelCase )
A__ = kwargs.pop('''input_features''',__lowerCamelCase )
A__ = kwargs.pop('''labels''',__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if input_features is not None:
A__ = self.feature_extractor.pad(__lowerCamelCase,*__lowerCamelCase,**__lowerCamelCase )
if labels is not None:
A__ = self.tokenizer.pad(__lowerCamelCase,**__lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A__ = labels['''input_ids''']
return input_features
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.decode(*__lowerCamelCase,**__lowerCamelCase )
@contextmanager
def UpperCamelCase ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
A__ = True
A__ = self.tokenizer
yield
A__ = self.feature_extractor
A__ = False
| 39 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = StableUnCLIPImgaImgPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCamelCase = frozenset([] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 32
_lowerCAmelCase = embedder_hidden_size
# image encoding components
_lowerCAmelCase = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_snake_case , projection_dim=_snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
_lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_snake_case )
_lowerCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_snake_case , layers_per_block=1 , upcast_attention=_snake_case , use_linear_projection=_snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=_snake_case , steps_offset=1 , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def snake_case ( self , _snake_case , _snake_case=0 , _snake_case=True ):
"""simple docstring"""
if str(_snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
if pil_image:
_lowerCAmelCase = input_image * 0.5 + 0.5
_lowerCAmelCase = input_image.clamp(0 , 1 )
_lowerCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowerCAmelCase = DiffusionPipeline.numpy_to_pil(_snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableUnCLIPImgaImgPipeline(**_snake_case )
_lowerCAmelCase = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
inputs.update({"""image_embeds""": None} )
_lowerCAmelCase = sd_pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=_snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_snake_case )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
_lowerCAmelCase = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase = pipe(_snake_case , """anime turle""" , generator=_snake_case , output_type="""np""" )
_lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
_lowerCAmelCase = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase = pipe(_snake_case , """anime turle""" , generator=_snake_case , output_type="""np""" )
_lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
_lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = pipe(
_snake_case , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 82 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
_lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
_lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
_lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(snake_case )-1}' )
if "norm" in key:
_lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
_lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(snake_case )-1}' )
if "layer_norm1" in key:
_lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
_lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
_lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
_lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(snake_case )-1}' )
if "attn.q" in key:
_lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
_lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
_lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
_lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
_lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
_lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
_lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
_lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
_lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(snake_case )-1}' )
if "bot_conv" in key:
_lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
_lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
_lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
_lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
_lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
_lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
_lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
_lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
_lowerCAmelCase = value
return new_state_dict
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
_lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
_lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( snake_case , snake_case , snake_case=False , snake_case=None ):
"""simple docstring"""
_lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_lowerCAmelCase = GLPNImageProcessor()
# prepare image
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=snake_case , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
_lowerCAmelCase = torch.load(snake_case , map_location=torch.device("""cpu""" ) )
# rename keys
_lowerCAmelCase = rename_keys(snake_case )
# key and value matrices need special treatment
read_in_k_v(snake_case , snake_case )
# create HuggingFace model and load state dict
_lowerCAmelCase = GLPNForDepthEstimation(snake_case )
model.load_state_dict(snake_case )
model.eval()
# forward pass
_lowerCAmelCase = model(snake_case )
_lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
_lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
_lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , snake_case , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=snake_case , )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
A__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 82 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCamelCase__ = "hf-internal-testing/tiny-random-bert"
lowerCamelCase__ = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
lowerCamelCase__ = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Any = cached_file(__a , __a )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__a ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__a , __a ) ) )
with open(os.path.join(__a , "refs" , "main" ) ) as f:
_UpperCamelCase : Dict = f.read()
self.assertEqual(__a , os.path.join(__a , "snapshots" , __a , __a ) )
self.assertTrue(os.path.isfile(__a ) )
# File is cached at the same place the second time.
_UpperCamelCase : Tuple = cached_file(__a , __a )
self.assertEqual(__a , __a )
# Using a specific revision to test the full commit hash.
_UpperCamelCase : Any = cached_file(__a , __a , revision="9b8c223" )
self.assertEqual(__a , os.path.join(__a , "snapshots" , __a , __a ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
with self.assertRaisesRegex(__a , "is not a valid model identifier" ):
_UpperCamelCase : Tuple = cached_file("tiny-random-bert" , __a )
with self.assertRaisesRegex(__a , "is not a valid git identifier" ):
_UpperCamelCase : int = cached_file(__a , __a , revision="aaaa" )
with self.assertRaisesRegex(__a , "does not appear to have a file named" ):
_UpperCamelCase : Tuple = cached_file(__a , "conf" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
with self.assertRaisesRegex(__a , "does not appear to have a file named" ):
_UpperCamelCase : List[Any] = cached_file(__a , "conf" )
with open(os.path.join(__a , "refs" , "main" ) ) as f:
_UpperCamelCase : Optional[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(__a , ".no_exist" , __a , "conf" ) ) )
_UpperCamelCase : List[Any] = cached_file(__a , "conf" , _raise_exceptions_for_missing_entries=__a )
self.assertIsNone(__a )
_UpperCamelCase : int = cached_file(__a , "conf" , local_files_only=__a , _raise_exceptions_for_missing_entries=__a )
self.assertIsNone(__a )
_UpperCamelCase : Any = mock.Mock()
_UpperCamelCase : Dict = 500
_UpperCamelCase : str = {}
_UpperCamelCase : Optional[Any] = HTTPError
_UpperCamelCase : Optional[Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
_UpperCamelCase : Union[str, Any] = cached_file(__a , "conf" , _raise_exceptions_for_connection_errors=__a )
self.assertIsNone(__a )
# This check we did call the fake head request
mock_head.assert_called()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , __a ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , __a ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , __a ) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__a , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , __a )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__a , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , __a , revision="ahaha" )
_UpperCamelCase : Tuple = get_file_from_repo("bert-base-cased" , __a )
# The name is the cached name which is not very easy to test, so instead we load the content.
_UpperCamelCase : List[Any] = json.loads(open(__a , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : Union[str, Any] = Path(__a ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(__a , "a.txt" ) , str(__a ) )
self.assertIsNone(get_file_from_repo(__a , "b.txt" ) )
| 364 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310 | 0 |
"""simple docstring"""
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = generate_pascal_triangle(lowerCamelCase )
for row_idx in range(lowerCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
UpperCAmelCase__ = []
for current_row_idx in range(lowerCamelCase ):
UpperCAmelCase__ = populate_current_row(lowerCamelCase , lowerCamelCase )
triangle.append(lowerCamelCase )
return triangle
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase__ , UpperCAmelCase__ = 1, 1
for current_col_idx in range(1 , lowerCamelCase ):
calculate_current_element(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
return current_row
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
UpperCAmelCase__ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase__ = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase__ = above_to_left_elt + above_to_right_elt
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
UpperCAmelCase__ = [[1]]
for row_index in range(1 , lowerCamelCase ):
UpperCAmelCase__ = [0] + result[-1] + [0]
UpperCAmelCase__ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase__ = sum(divmod(lowerCamelCase , 2 ) )
UpperCAmelCase__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase__ = row_first_half + row_second_half
result.append(lowerCamelCase )
return result
def a_ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase , lowerCamelCase ) -> None:
UpperCAmelCase__ = f'''{func.__name__}({value})'''
UpperCAmelCase__ = timeit(f'''__main__.{call}''' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCamelCase , lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 98 | """simple docstring"""
from PIL import Image
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(lowerCamelCase ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowerCAmelCase__ : Any = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 98 | 1 |
"""simple docstring"""
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase ):
_UpperCamelCase , _UpperCamelCase : Optional[int] = len(_lowercase ), len(grid[0] )
if (
min(_lowercase , _lowercase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_UpperCamelCase : Dict = 0
count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
UpperCamelCase_ ="""examples/"""
UpperCamelCase_ ={
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
UpperCamelCase_ ={
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
UpperCamelCase_ ="""README.md"""
def a_ ( _lowercase , _lowercase , _lowercase ):
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCamelCase : Tuple = f.read()
_UpperCamelCase , _UpperCamelCase : List[Any] = REPLACE_PATTERNS[pattern]
_UpperCamelCase : Optional[Any] = replace.replace('''VERSION''' , _lowercase )
_UpperCamelCase : List[Any] = re_pattern.sub(_lowercase , _lowercase )
with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_lowercase )
def a_ ( _lowercase ):
for folder, directories, fnames in os.walk(_lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_lowercase , _lowercase ) , _lowercase , pattern='''examples''' )
def a_ ( _lowercase , _lowercase=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowercase , _lowercase , _lowercase )
if not patch:
update_version_in_examples(_lowercase )
def a_ ( ):
_UpperCamelCase : Any = '''🤗 Transformers currently provides the following architectures'''
_UpperCamelCase : List[str] = '''1. Want to contribute a new model?'''
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCamelCase : List[Any] = f.readlines()
# Find the start of the list.
_UpperCamelCase : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_UpperCamelCase : Any = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_UpperCamelCase : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_lowercase )
def a_ ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
_UpperCamelCase : List[Any] = f.read()
_UpperCamelCase : List[Any] = REPLACE_PATTERNS['''init'''][0].search(_lowercase ).groups()[0]
return packaging.version.parse(_lowercase )
def a_ ( _lowercase=False ):
_UpperCamelCase : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_UpperCamelCase : List[str] = default_version.base_version
elif patch:
_UpperCamelCase : Union[str, Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_UpperCamelCase : str = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_UpperCamelCase : Optional[int] = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_lowercase ) == 0:
_UpperCamelCase : str = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_lowercase , patch=_lowercase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def a_ ( ):
_UpperCamelCase : Any = get_version()
_UpperCamelCase : Dict = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_UpperCamelCase : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
_UpperCamelCase : int = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_lowercase ) == 0:
_UpperCamelCase : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_lowercase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase_ =argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
UpperCamelCase_ =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 128 | 1 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowercase_ = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowercase_ = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=snake_case )[0]
@deprecated(snake_case , '''Please use tf.data to implement this functionality.''' )
def a__ ( snake_case ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=snake_case ) as bytestream:
__SCREAMING_SNAKE_CASE : List[str] = _readaa(snake_case )
if magic != 2_051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__SCREAMING_SNAKE_CASE : str = _readaa(snake_case )
__SCREAMING_SNAKE_CASE : int = _readaa(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = _readaa(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = bytestream.read(rows * cols * num_images )
__SCREAMING_SNAKE_CASE : Union[str, Any] = numpy.frombuffer(snake_case , dtype=numpy.uinta )
__SCREAMING_SNAKE_CASE : Optional[Any] = data.reshape(snake_case , snake_case , snake_case , 1 )
return data
@deprecated(snake_case , '''Please use tf.one_hot on tensors.''' )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = labels_dense.shape[0]
__SCREAMING_SNAKE_CASE : Any = numpy.arange(snake_case ) * num_classes
__SCREAMING_SNAKE_CASE : Union[str, Any] = numpy.zeros((num_labels, num_classes) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
return labels_one_hot
@deprecated(snake_case , '''Please use tf.data to implement this functionality.''' )
def a__ ( snake_case , snake_case=False , snake_case=10 ):
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=snake_case ) as bytestream:
__SCREAMING_SNAKE_CASE : str = _readaa(snake_case )
if magic != 2_049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = _readaa(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = bytestream.read(snake_case )
__SCREAMING_SNAKE_CASE : str = numpy.frombuffer(snake_case , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(snake_case , snake_case )
return labels
class __UpperCamelCase :
"""simple docstring"""
@deprecated(
_A , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Optional[int] , _A : List[Any] , _A : List[str] , _A : List[Any]=False , _A : str=False , _A : int=dtypes.floataa , _A : Dict=True , _A : Dict=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = random_seed.get_seed(_A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__SCREAMING_SNAKE_CASE : str = dtypes.as_dtype(_A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
__SCREAMING_SNAKE_CASE : Optional[int] = 1_0000
__SCREAMING_SNAKE_CASE : Tuple = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__SCREAMING_SNAKE_CASE : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__SCREAMING_SNAKE_CASE : Any = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__SCREAMING_SNAKE_CASE : Tuple = images.astype(numpy.floataa )
__SCREAMING_SNAKE_CASE : List[Any] = numpy.multiply(_A , 1.0 / 2_55.0 )
__SCREAMING_SNAKE_CASE : int = images
__SCREAMING_SNAKE_CASE : List[str] = labels
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : int = 0
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return self._images
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return self._labels
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return self._num_examples
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return self._epochs_completed
def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[Any] , _A : Optional[int]=False , _A : Dict=True ):
"""simple docstring"""
if fake_data:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [1] * 784
__SCREAMING_SNAKE_CASE : List[str] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_A )],
[fake_label for _ in range(_A )],
)
__SCREAMING_SNAKE_CASE : Optional[int] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__SCREAMING_SNAKE_CASE : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
__SCREAMING_SNAKE_CASE : Dict = self.images[perma]
__SCREAMING_SNAKE_CASE : Any = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__SCREAMING_SNAKE_CASE : Optional[int] = self._num_examples - start
__SCREAMING_SNAKE_CASE : List[str] = self._images[start : self._num_examples]
__SCREAMING_SNAKE_CASE : Dict = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__SCREAMING_SNAKE_CASE : int = numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
__SCREAMING_SNAKE_CASE : int = self.images[perm]
__SCREAMING_SNAKE_CASE : List[Any] = self.labels[perm]
# Start next epoch
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size - rest_num_examples
__SCREAMING_SNAKE_CASE : Optional[Any] = self._index_in_epoch
__SCREAMING_SNAKE_CASE : List[Any] = self._images[start:end]
__SCREAMING_SNAKE_CASE : List[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__SCREAMING_SNAKE_CASE : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(snake_case , '''Please write your own downloading logic.''' )
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
if not gfile.Exists(snake_case ):
gfile.MakeDirs(snake_case )
__SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case )
if not gfile.Exists(snake_case ):
urllib.request.urlretrieve(snake_case , snake_case ) # noqa: S310
with gfile.GFile(snake_case ) as f:
__SCREAMING_SNAKE_CASE : Optional[Any] = f.size()
print('''Successfully downloaded''' , snake_case , snake_case , '''bytes.''' )
return filepath
@deprecated(
snake_case , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def a__ ( snake_case , snake_case=False , snake_case=False , snake_case=dtypes.floataa , snake_case=True , snake_case=5_000 , snake_case=None , snake_case=DEFAULT_SOURCE_URL , ):
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=snake_case , one_hot=snake_case , dtype=snake_case , seed=snake_case )
__SCREAMING_SNAKE_CASE : List[str] = fake()
__SCREAMING_SNAKE_CASE : List[str] = fake()
__SCREAMING_SNAKE_CASE : List[str] = fake()
return _Datasets(train=snake_case , validation=snake_case , test=snake_case )
if not source_url: # empty string check
__SCREAMING_SNAKE_CASE : int = DEFAULT_SOURCE_URL
__SCREAMING_SNAKE_CASE : int = '''train-images-idx3-ubyte.gz'''
__SCREAMING_SNAKE_CASE : int = '''train-labels-idx1-ubyte.gz'''
__SCREAMING_SNAKE_CASE : Tuple = '''t10k-images-idx3-ubyte.gz'''
__SCREAMING_SNAKE_CASE : List[Any] = '''t10k-labels-idx1-ubyte.gz'''
__SCREAMING_SNAKE_CASE : Optional[int] = _maybe_download(
snake_case , snake_case , source_url + train_images_file )
with gfile.Open(snake_case , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Any = _extract_images(snake_case )
__SCREAMING_SNAKE_CASE : int = _maybe_download(
snake_case , snake_case , source_url + train_labels_file )
with gfile.Open(snake_case , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : List[str] = _extract_labels(snake_case , one_hot=snake_case )
__SCREAMING_SNAKE_CASE : str = _maybe_download(
snake_case , snake_case , source_url + test_images_file )
with gfile.Open(snake_case , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Optional[int] = _extract_images(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = _maybe_download(
snake_case , snake_case , source_url + test_labels_file )
with gfile.Open(snake_case , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : str = _extract_labels(snake_case , one_hot=snake_case )
if not 0 <= validation_size <= len(snake_case ):
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''Validation size should be between 0 and '''
F'''{len(snake_case )}. Received: {validation_size}.'''
)
raise ValueError(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = train_images[:validation_size]
__SCREAMING_SNAKE_CASE : Tuple = train_labels[:validation_size]
__SCREAMING_SNAKE_CASE : List[Any] = train_images[validation_size:]
__SCREAMING_SNAKE_CASE : Tuple = train_labels[validation_size:]
__SCREAMING_SNAKE_CASE : Dict = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__SCREAMING_SNAKE_CASE : List[Any] = _DataSet(snake_case , snake_case , **snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = _DataSet(snake_case , snake_case , **snake_case )
__SCREAMING_SNAKE_CASE : List[str] = _DataSet(snake_case , snake_case , **snake_case )
return _Datasets(train=snake_case , validation=snake_case , test=snake_case )
| 303 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 303 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Dict = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
__snake_case : int = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : List[Any] = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Dict = ['input_ids', 'attention_mask']
__lowercase : Optional[int] = RobertaTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> str:
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
A_ = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
A_ = add_prefix_space
A_ = pre_tok_class(**_SCREAMING_SNAKE_CASE )
A_ = add_prefix_space
A_ = '''post_processor'''
A_ = getattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
A_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ = tuple(state['''sep'''] )
if "cls" in state:
A_ = tuple(state['''cls'''] )
A_ = False
if state.get('''add_prefix_space''' , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
A_ = add_prefix_space
A_ = True
if state.get('''trim_offsets''' , _SCREAMING_SNAKE_CASE ) != trim_offsets:
A_ = trim_offsets
A_ = True
if changes_to_apply:
A_ = getattr(_SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
A_ = component_class(**_SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def __A ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
A_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value
A_ = value
def __A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
A_ = kwargs.get('''is_split_into_words''' , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
A_ = kwargs.get('''is_split_into_words''' , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
A_ = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
A_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 18 | '''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _UpperCAmelCase ( _UpperCamelCase : Features ) -> Optional[int]:
A_ = np.inf
def set_batch_size(_UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_UpperCamelCase, _UpperCamelCase ) and feature.dtype == "binary":
A_ = min(_UpperCamelCase, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_UpperCamelCase, _UpperCamelCase )
return None if batch_size is np.inf else batch_size
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ = path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
A_ = Parquet(
cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __A ( self ) -> str:
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Dict:
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self ) -> int:
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
A_ = self._write(file_obj=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
return written
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
A_ = 0
A_ = parquet_writer_kwargs.pop('''path_or_buf''' , _SCREAMING_SNAKE_CASE )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _SCREAMING_SNAKE_CASE ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
A_ = query_table(
table=self.dataset._data , key=slice(_SCREAMING_SNAKE_CASE , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_SCREAMING_SNAKE_CASE )
written += batch.nbytes
writer.close()
return written
| 18 | 1 |
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase ) -> None:
lowerCAmelCase_ = num_of_nodes
lowerCAmelCase_ = []
lowerCAmelCase_ = {}
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def __a ( self , _UpperCamelCase ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __a ( self , _UpperCamelCase ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase_ = self.find_component(_SCREAMING_SNAKE_CASE )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> None:
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase_ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_SCREAMING_SNAKE_CASE )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase_ = self.find_component(_SCREAMING_SNAKE_CASE )
component_size[u_node] += component_size[v_node]
self.set_component(_SCREAMING_SNAKE_CASE )
def __a ( self ) -> None:
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
lowerCAmelCase_ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase_ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase_ = edge
lowerCAmelCase_ = self.m_component[u]
lowerCAmelCase_ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase_ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ = edge
lowerCAmelCase_ = self.m_component[u]
lowerCAmelCase_ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
lowerCAmelCase_ = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def lowerCamelCase__ ( ):
"""simple docstring"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'torchsde']
def __init__( self: int , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"]) | 269 | 0 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __snake_case( _lowerCAmelCase = True , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
snake_case__ : List[Any] = False
if main_process_only:
snake_case__ : Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*_lowerCAmelCase , **_lowerCAmelCase , disable=_lowerCAmelCase )
| 43 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__a = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , ) -> List[Any]:
output_path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , enable_onnx_checker=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
else:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False ) -> int:
snake_case__ : str = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case__ : List[Any] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
snake_case__ : Tuple = """cpu"""
snake_case__ : int = Path(_lowerCAmelCase )
# VAE DECODER
snake_case__ : List[str] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
snake_case__ : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
snake_case__ : Dict = vae_decoder.decode
onnx_export(
_lowerCAmelCase , model_args=(
torch.randn(1 , _lowerCAmelCase , 25 , 25 ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=_lowerCAmelCase , )
del vae_decoder
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
__a = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 43 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :List[Any] = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :str = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[Any] = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 159 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
SCREAMING_SNAKE_CASE :Union[str, Any] = namedtuple('''covid_data''', '''cases deaths recovered''')
def _lowerCAmelCase ( lowerCAmelCase_ :str = "https://www.worldometers.info/coronavirus/" )->covid_data:
'''simple docstring'''
snake_case_ = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(lowerCAmelCase_ ).content ).xpath(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE :str = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats()))
| 159 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_3 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=9_9 , SCREAMING_SNAKE_CASE__ : List[str]=3_2 , SCREAMING_SNAKE_CASE__ : Any=5 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : Dict=3_7 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : str=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_6 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : str=4 , ) -> Dict:
a_ : Optional[int] = parent
a_ : Dict = batch_size
a_ : Dict = seq_length
a_ : Optional[Any] = is_training
a_ : Any = use_attention_mask
a_ : int = use_token_type_ids
a_ : List[Any] = use_labels
a_ : int = vocab_size
a_ : Optional[Any] = hidden_size
a_ : int = num_hidden_layers
a_ : Union[str, Any] = num_attention_heads
a_ : List[str] = intermediate_size
a_ : Optional[int] = hidden_act
a_ : Optional[Any] = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : str = max_position_embeddings
a_ : Optional[Any] = type_vocab_size
a_ : Tuple = type_sequence_label_size
a_ : Optional[Any] = initializer_range
a_ : Any = num_choices
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Optional[int] = None
if self.use_attention_mask:
a_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
a_ : Tuple = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=SCREAMING_SNAKE_CASE__ , )
return config, input_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
a_ : List[Any] = self.prepare_config_and_inputs()
a_ : Optional[Any] = config_and_inputs
a_ : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : Optional[Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
a_ : int = FlaxDistilBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_class_name in self.all_model_classes:
a_ : Optional[Any] = model_class_name.from_pretrained('distilbert-base-uncased' )
a_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
a_ : Optional[Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
a_ : List[Any] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
a_ : List[str] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a_ : Any = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
a_ : int = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
a_ : Dict = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 354 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Union[str, Any] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 120 | 0 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(SCREAMING_SNAKE_CASE ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 110 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = KandinskyVaaImgaImgPipeline
_lowercase : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''']
_lowercase : Any = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_lowercase : Union[str, Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowercase : Optional[Any] = False
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase__ = UNetaDConditionModel(**UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.dummy_unet
lowercase__ = self.dummy_movq
lowercase__ = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase__ = DDIMScheduler(**UpperCamelCase_ )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=0 ) -> Optional[int]:
"""simple docstring"""
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase_ )
# create init_image
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((256, 256) )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
lowercase__ = output.images
lowercase__ = pipe(
**self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase__ = '''A red cartoon frog, 4k'''
lowercase__ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase_ )
lowercase__ = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowercase__ = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ , lowercase__ = pipe_prior(
UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowercase__ = pipeline(
image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 110 | 1 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_snake_case = 1
_snake_case = 1
while repunit:
_snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 100_0000 ):
_snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_SCREAMING_SNAKE_CASE ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''') | 270 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = DiTPipeline
lowerCAmelCase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
lowerCAmelCase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCAmelCase_ = False
def lowercase (self ) -> Union[str, Any]:
torch.manual_seed(0 )
_snake_case = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=UpperCAmelCase , )
_snake_case = AutoencoderKL()
_snake_case = DDIMScheduler()
_snake_case = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def lowercase (self , UpperCAmelCase , UpperCAmelCase=0 ) -> List[str]:
if str(UpperCAmelCase ).startswith("""mps""" ):
_snake_case = torch.manual_seed(UpperCAmelCase )
else:
_snake_case = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_snake_case = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowercase (self ) -> Union[str, Any]:
_snake_case = """cpu"""
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_snake_case = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_snake_case = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
def lowercase (self ) -> List[str]:
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowercase (self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase (self ) -> Any:
_snake_case = torch.manual_seed(0 )
_snake_case = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_snake_case = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_snake_case = pipe.get_label_ids(UpperCAmelCase )
_snake_case = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
_snake_case = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowercase (self ) -> Union[str, Any]:
_snake_case = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_snake_case = ["""vase""", """umbrella"""]
_snake_case = pipe.get_label_ids(UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
_snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1 | 270 | 1 |
'''simple docstring'''
import operator
def a ( lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : str = operator.lt if reverse else operator.gt
A_ : Union[str, Any] = solution or []
if not arr:
return solution
A_ : Tuple = [arr.pop(0 )]
for i, item in enumerate(lowerCamelCase__ ):
if _operator(lowerCamelCase__ , sublist[-1] ):
sublist.append(lowerCamelCase__ )
arr.pop(lowerCamelCase__ )
# merging sublist into solution list
if not solution:
solution.extend(lowerCamelCase__ )
else:
while sublist:
A_ : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(lowerCamelCase__ ):
if not _operator(lowerCamelCase__ , lowerCamelCase__ ):
solution.insert(lowerCamelCase__ , lowerCamelCase__ )
break
else:
solution.append(lowerCamelCase__ )
strand_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 206 |
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = os.path.dirname(os.path.realpath(lowerCamelCase__ ) )
lowercase__ : Optional[Any] = os.path.join(lowerCamelCase__ , "words.txt" )
lowercase__ : int = ""
with open(lowerCamelCase__ ) as f:
lowercase__ : Any = f.readline()
lowercase__ : Any = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
lowercase__ : str = [
word
for word in [sum(ord(lowerCamelCase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCamelCase__ )
if __name__ == "__main__":
print(solution())
| 130 | 0 |
def snake_case (__lowercase , __lowercase ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def snake_case () -> None:
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 362 | def snake_case () -> Dict:
'''simple docstring'''
_snake_case : List[str] = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 284 | 0 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "EncodecFeatureExtractor"
snake_case_ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Tuple ,A : Any ,A : List[str] ):
super().__init__(A ,A )
__A = self.feature_extractor
__A = False
def UpperCamelCase_ ( self : Optional[int] ,A : Tuple=None ,A : List[str]=None ,A : Union[str, Any]=True ):
return self.tokenizer.get_decoder_prompt_ids(task=A ,language=A ,no_timestamps=A )
def __call__( self : int ,*A : Any ,**A : Any ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A ,**A )
__A = kwargs.pop("audio" ,A )
__A = kwargs.pop("sampling_rate" ,A )
__A = kwargs.pop("text" ,A )
if len(A ) > 0:
__A = args[0]
__A = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
__A = self.tokenizer(A ,**A )
if audio is not None:
__A = self.feature_extractor(A ,*A ,sampling_rate=A ,**A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
__A = audio_inputs["padding_mask"]
return inputs
def UpperCamelCase_ ( self : List[Any] ,*A : Optional[Any] ,**A : Optional[int] ):
__A = kwargs.pop("audio" ,A )
__A = kwargs.pop("padding_mask" ,A )
if len(A ) > 0:
__A = args[0]
__A = args[1:]
if audio_values is not None:
return self._decode_audio(A ,padding_mask=A )
else:
return self.tokenizer.batch_decode(*A ,**A )
def UpperCamelCase_ ( self : List[Any] ,*A : Dict ,**A : Tuple ):
return self.tokenizer.decode(*A ,**A )
def UpperCamelCase_ ( self : int ,A : Union[str, Any] ,A : Optional = None ):
__A = to_numpy(A )
__A , __A , __A = audio_values.shape
if padding_mask is None:
return list(A )
__A = to_numpy(A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A = seq_len - padding_mask.shape[-1]
__A = 1 - self.feature_extractor.padding_value
__A = np.pad(A ,((0, 0), (0, difference)) ,"constant" ,constant_values=A )
__A = audio_values.tolist()
for i in range(A ):
__A = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A = sliced_audio.reshape(A ,-1 )
return audio_values
| 15 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 2
@register_to_config
def __init__( self : str ,A : float = 0.02 ,A : float = 1_00 ,A : float = 1.0_07 ,A : float = 80 ,A : float = 0.05 ,A : float = 50 ,):
# standard deviation of the initial noise distribution
__A = sigma_max
# setable values
__A = None
__A = None
__A = None # sigma(t_i)
def UpperCamelCase_ ( self : str ,A : torch.FloatTensor ,A : Optional[int] = None ):
return sample
def UpperCamelCase_ ( self : Dict ,A : int ,A : Union[str, torch.device] = None ):
__A = num_inference_steps
__A = np.arange(0 ,self.num_inference_steps )[::-1].copy()
__A = torch.from_numpy(A ).to(A )
__A = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__A = torch.tensor(A ,dtype=torch.floataa ,device=A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : torch.FloatTensor ,A : float ,A : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
__A = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 )
else:
__A = 0
# sample eps ~ N(0, S_noise^2 * I)
__A = self.config.s_noise * randn_tensor(sample.shape ,generator=A ).to(sample.device )
__A = sigma + gamma * sigma
__A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase_ ( self : Dict ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : bool = True ,):
__A = sample_hat + sigma_hat * model_output
__A = (sample_hat - pred_original_sample) / sigma_hat
__A = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A ,derivative=A ,pred_original_sample=A )
def UpperCamelCase_ ( self : Optional[int] ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : bool = True ,):
__A = sample_prev + sigma_prev * model_output
__A = (sample_prev - pred_original_sample) / sigma_prev
__A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A ,derivative=A ,pred_original_sample=A )
def UpperCamelCase_ ( self : List[Any] ,A : Dict ,A : List[str] ,A : str ):
raise NotImplementedError()
| 15 | 1 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
A__ = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
A__ = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
A__ = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
create_state_space_tree(lowercase_ , [] , 0 )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_lowerCamelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 231 | 0 |
"""simple docstring"""
from manim import *
class A__ ( _lowerCamelCase):
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = Rectangle(height=0.5 , width=0.5 )
__lowerCAmelCase : str = Rectangle(height=0.25 , width=0.25 )
__lowerCAmelCase : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
__lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
__lowerCAmelCase : List[Any] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : str = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : str = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : int = Text('CPU' , font_size=24 )
__lowerCAmelCase : Optional[int] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = [mem.copy() for i in range(4 )]
__lowerCAmelCase : Dict = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : int = Text('GPU' , font_size=24 )
__lowerCAmelCase : int = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = [mem.copy() for i in range(6 )]
__lowerCAmelCase : Optional[Any] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : str = Text('Model' , font_size=24 )
__lowerCAmelCase : List[str] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = []
__lowerCAmelCase : Dict = []
__lowerCAmelCase : List[str] = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
rect.set_stroke(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_SCREAMING_SNAKE_CASE )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
self.add(_SCREAMING_SNAKE_CASE )
model_cpu_arr.append(_SCREAMING_SNAKE_CASE )
self.add(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [mem.copy() for i in range(6 )]
__lowerCAmelCase : Union[str, Any] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : Optional[int] = Text('Loaded Checkpoint' , font_size=24 )
__lowerCAmelCase : Union[str, Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
checkpoint.move_to([3, 0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = fill.copy().set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
target.move_to(_SCREAMING_SNAKE_CASE )
ckpt_arr.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_SCREAMING_SNAKE_CASE )
self.add(*_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCAmelCase : Optional[int] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
__lowerCAmelCase : List[Any] = [meta_mem.copy() for i in range(6 )]
__lowerCAmelCase : Any = [meta_mem.copy() for i in range(6 )]
__lowerCAmelCase : Any = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : Optional[int] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : Optional[Any] = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
__lowerCAmelCase : List[Any] = Text('Disk' , font_size=24 )
__lowerCAmelCase : Optional[Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) , Write(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) )
__lowerCAmelCase : List[Any] = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(FadeOut(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Union[str, Any] = MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=3 ) )
self.play(
FadeOut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) , )
self.wait() | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = FunnelConfig.from_json_file(lowerCamelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
A_ : int = FunnelBaseModel(lowerCamelCase__ ) if base_model else FunnelModel(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
lowerCamelCase :int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
) | 135 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :List[Any] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'camembert'
def __init__(self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ):
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A_ : List[Any] = vocab_size
A_ : int = hidden_size
A_ : Dict = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Optional[Any] = hidden_act
A_ : str = intermediate_size
A_ : int = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : Optional[int] = type_vocab_size
A_ : int = initializer_range
A_ : str = layer_norm_eps
A_ : int = position_embedding_type
A_ : Dict = use_cache
A_ : Any = classifier_dropout
class _lowerCAmelCase ( __UpperCAmelCase ):
@property
def _a (self ):
if self.task == "multiple-choice":
A_ : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 135 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = IFInpaintingPipeline
_a = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
_a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_a = PipelineTesterMixin.required_optional_params - {"""latents"""}
def A__ ( self ) -> Dict:
'''simple docstring'''
return self._get_dummy_components()
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Tuple:
'''simple docstring'''
if str(lowerCAmelCase ).startswith('mps' ):
_lowercase =torch.manual_seed(lowerCAmelCase )
else:
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
_lowercase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
_lowercase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
_lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A__ ( self ) -> Tuple:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def A__ ( self ) -> Tuple:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def A__ ( self ) -> int:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def A__ ( self ) -> Tuple:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def A__ ( self ) -> Dict:
'''simple docstring'''
self._test_save_load_local()
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 205 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase_ = data_utils.TransfoXLTokenizer
lowercase_ = data_utils.TransfoXLCorpus
lowercase_ = data_utils
lowercase_ = data_utils
def a ( A__ : int , A__ : Dict , A__ : Union[str, Any] , A__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(A__ , 'rb' ) as fp:
_lowercase =pickle.load(A__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_lowercase =pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
_lowercase =corpus.vocab.__dict__
torch.save(A__ , A__ )
_lowercase =corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , A__ )
_lowercase =pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(A__ , A__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_lowercase =os.path.abspath(A__ )
_lowercase =os.path.abspath(A__ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_lowercase =TransfoXLConfig()
else:
_lowercase =TransfoXLConfig.from_json_file(A__ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase =TransfoXLLMHeadModel(A__ )
_lowercase =load_tf_weights_in_transfo_xl(A__ , A__ , A__ )
# Save pytorch-model
_lowercase =os.path.join(A__ , A__ )
_lowercase =os.path.join(A__ , A__ )
print(F'''Save PyTorch model to {os.path.abspath(A__ )}''' )
torch.save(model.state_dict() , A__ )
print(F'''Save configuration file to {os.path.abspath(A__ )}''' )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowercase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 205 | 1 |
lowerCAmelCase__ = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
lowerCAmelCase__ = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
lowerCAmelCase__ = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
lowerCAmelCase__ = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
lowerCAmelCase__ = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
lowerCAmelCase__ = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
lowerCAmelCase__ = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
lowerCAmelCase__ = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 363 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return choice(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = random_pivot(_SCREAMING_SNAKE_CASE )
# partition based on pivot
# linear time
UpperCamelCase = [e for e in lst if e < pivot]
UpperCamelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_SCREAMING_SNAKE_CASE ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_SCREAMING_SNAKE_CASE ) < k - 1:
return kth_number(_SCREAMING_SNAKE_CASE , k - len(_SCREAMING_SNAKE_CASE ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244 | 0 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__a = re.compile(R"\b(a|an|the)\b", re.UNICODE)
__a = None
def __snake_case( ) -> Any:
snake_case__ : Any = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=_lowerCAmelCase , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=_lowerCAmelCase , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case__ : List[str] = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def __snake_case( _lowerCAmelCase ) -> Tuple:
def remove_articles(_lowerCAmelCase ):
return ARTICLES_REGEX.sub(""" """ , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase ):
snake_case__ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def __snake_case( _lowerCAmelCase ) -> Dict:
if not s:
return []
return normalize_answer(_lowerCAmelCase ).split()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : Union[str, Any] = get_tokens(_lowerCAmelCase )
snake_case__ : Optional[Any] = get_tokens(_lowerCAmelCase )
snake_case__ : Optional[Any] = collections.Counter(_lowerCAmelCase ) & collections.Counter(_lowerCAmelCase )
snake_case__ : int = sum(common.values() )
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case__ : List[Any] = 1.0 * num_same / len(_lowerCAmelCase )
snake_case__ : Any = 1.0 * num_same / len(_lowerCAmelCase )
snake_case__ : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
snake_case__ : Optional[Any] = {}
snake_case__ : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case__ : Optional[int] = qa["""id"""]
snake_case__ : Optional[Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(_lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case__ : Optional[int] = [""""""]
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
snake_case__ : Tuple = preds[qid]
# Take max over all gold answers
snake_case__ : str = max(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers )
snake_case__ : Any = max(compute_fa(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
snake_case__ : Optional[int] = {}
for qid, s in scores.items():
snake_case__ : Optional[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case__ : Optional[int] = float(not qid_to_has_ans[qid] )
else:
snake_case__ : Union[str, Any] = s
return new_scores
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
if not qid_list:
snake_case__ : str = len(_lowerCAmelCase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case__ : int = len(_lowerCAmelCase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
for k in new_eval:
snake_case__ : str = new_eval[k]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
plt.step(_lowerCAmelCase , _lowerCAmelCase , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(_lowerCAmelCase , _lowerCAmelCase , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_lowerCAmelCase )
plt.savefig(_lowerCAmelCase )
plt.clf()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Any:
snake_case__ : Optional[int] = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] )
snake_case__ : str = 0.0
snake_case__ : Dict = 1.0
snake_case__ : List[str] = 0.0
snake_case__ : Dict = [1.0]
snake_case__ : int = [0.0]
snake_case__ : str = 0.0
for i, qid in enumerate(_lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case__ : Dict = true_pos / float(i + 1 )
snake_case__ : Union[str, Any] = true_pos / float(_lowerCAmelCase )
if i == len(_lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_lowerCAmelCase )
recalls.append(_lowerCAmelCase )
if out_image:
plot_pr_curve(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if out_image_dir and not os.path.exists(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
snake_case__ : Optional[Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case__ : Any = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
snake_case__ : Optional[Any] = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
snake_case__ : Any = {k: float(_lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
snake_case__ : Union[str, Any] = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , """pr_exact""" )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , """pr_f1""" )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , """pr_oracle""" )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
if not qid_list:
return
snake_case__ : Optional[Any] = [na_probs[k] for k in qid_list]
snake_case__ : Union[str, Any] = np.ones_like(_lowerCAmelCase ) / float(len(_lowerCAmelCase ) )
plt.hist(_lowerCAmelCase , weights=_lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(_lowerCAmelCase , f"na_prob_hist_{name}.png" ) )
plt.clf()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
snake_case__ : List[str] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case__ : Union[str, Any] = num_no_ans
snake_case__ : Optional[int] = cur_score
snake_case__ : Optional[Any] = 0.0
snake_case__ : Optional[int] = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(_lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case__ : List[str] = scores[qid]
else:
if preds[qid]:
snake_case__ : Tuple = -1
else:
snake_case__ : List[Any] = 0
cur_score += diff
if cur_score > best_score:
snake_case__ : Optional[Any] = cur_score
snake_case__ : Union[str, Any] = na_probs[qid]
return 100.0 * best_score / len(_lowerCAmelCase ), best_thresh
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
snake_case__ , snake_case__ : List[str] = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case__ , snake_case__ : int = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case__ : List[Any] = best_exact
snake_case__ : Dict = exact_thresh
snake_case__ : List[Any] = best_fa
snake_case__ : Dict = fa_thresh
def __snake_case( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
snake_case__ : Union[str, Any] = json.load(_lowerCAmelCase )
snake_case__ : Dict = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case__ : int = json.load(_lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case__ : Tuple = json.load(_lowerCAmelCase )
else:
snake_case__ : List[Any] = {k: 0.0 for k in preds}
snake_case__ : List[Any] = make_qid_to_has_ans(_lowerCAmelCase ) # maps qid to True/False
snake_case__ : Optional[int] = [k for k, v in qid_to_has_ans.items() if v]
snake_case__ : str = [k for k, v in qid_to_has_ans.items() if not v]
snake_case__ , snake_case__ : int = get_raw_scores(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Dict = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh )
snake_case__ : Optional[Any] = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh )
snake_case__ : List[Any] = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase )
if has_ans_qids:
snake_case__ : Optional[Any] = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , """HasAns""" )
if no_ans_qids:
snake_case__ : str = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
else:
print(json.dumps(_lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
__a = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 35 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __snake_case( _lowerCAmelCase ) -> Any:
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __snake_case( _lowerCAmelCase ) -> List[str]:
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__a = 1
while K:
__a = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__a = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 35 | 1 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = checkpoint
lowerCAmelCase__ : Any = {}
lowerCAmelCase__ : Optional[Any] = vae_state_dict['''encoder.conv_in.weight''']
lowerCAmelCase__ : Dict = vae_state_dict['''encoder.conv_in.bias''']
lowerCAmelCase__ : Any = vae_state_dict['''encoder.conv_out.weight''']
lowerCAmelCase__ : Tuple = vae_state_dict['''encoder.conv_out.bias''']
lowerCAmelCase__ : List[Any] = vae_state_dict['''encoder.norm_out.weight''']
lowerCAmelCase__ : Optional[Any] = vae_state_dict['''encoder.norm_out.bias''']
lowerCAmelCase__ : Tuple = vae_state_dict['''decoder.conv_in.weight''']
lowerCAmelCase__ : List[str] = vae_state_dict['''decoder.conv_in.bias''']
lowerCAmelCase__ : int = vae_state_dict['''decoder.conv_out.weight''']
lowerCAmelCase__ : List[str] = vae_state_dict['''decoder.conv_out.bias''']
lowerCAmelCase__ : Dict = vae_state_dict['''decoder.norm_out.weight''']
lowerCAmelCase__ : Optional[Any] = vae_state_dict['''decoder.norm_out.bias''']
lowerCAmelCase__ : str = vae_state_dict['''quant_conv.weight''']
lowerCAmelCase__ : List[str] = vae_state_dict['''quant_conv.bias''']
lowerCAmelCase__ : Optional[Any] = vae_state_dict['''post_quant_conv.weight''']
lowerCAmelCase__ : List[Any] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase__ : Optional[Any] = len({'''.'''.join(layer.split('''.''')[:3]) for layer in vae_state_dict if '''encoder.down''' in layer})
lowerCAmelCase__ : Union[str, Any] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(lowerCamelCase_)
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase__ : str = len({'''.'''.join(layer.split('''.''')[:3]) for layer in vae_state_dict if '''decoder.up''' in layer})
lowerCAmelCase__ : int = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(lowerCamelCase_)
}
for i in range(lowerCamelCase_):
lowerCAmelCase__ : int = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
lowerCAmelCase__ : Any = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""")
lowerCAmelCase__ : Union[str, Any] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""")
lowerCAmelCase__ : List[str] = renew_vae_resnet_paths(lowerCamelCase_)
lowerCAmelCase__ : Dict = {'''old''': f"""down.{i}.block""", '''new''': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,additional_replacements=[meta_path] ,config=lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
lowerCAmelCase__ : Optional[Any] = 2
for i in range(1 ,num_mid_res_blocks + 1):
lowerCAmelCase__ : Union[str, Any] = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
lowerCAmelCase__ : Optional[int] = renew_vae_resnet_paths(lowerCamelCase_)
lowerCAmelCase__ : int = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,additional_replacements=[meta_path] ,config=lowerCamelCase_)
lowerCAmelCase__ : List[Any] = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
lowerCAmelCase__ : Dict = renew_vae_attention_paths(lowerCamelCase_)
lowerCAmelCase__ : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,additional_replacements=[meta_path] ,config=lowerCamelCase_)
conv_attn_to_linear(lowerCamelCase_)
for i in range(lowerCamelCase_):
lowerCAmelCase__ : int = num_up_blocks - 1 - i
lowerCAmelCase__ : int = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
lowerCAmelCase__ : Tuple = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
lowerCAmelCase__ : Union[str, Any] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
lowerCAmelCase__ : Optional[int] = renew_vae_resnet_paths(lowerCamelCase_)
lowerCAmelCase__ : Tuple = {'''old''': f"""up.{block_id}.block""", '''new''': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,additional_replacements=[meta_path] ,config=lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
lowerCAmelCase__ : Tuple = 2
for i in range(1 ,num_mid_res_blocks + 1):
lowerCAmelCase__ : Any = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
lowerCAmelCase__ : Optional[Any] = renew_vae_resnet_paths(lowerCamelCase_)
lowerCAmelCase__ : List[Any] = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,additional_replacements=[meta_path] ,config=lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
lowerCAmelCase__ : List[Any] = renew_vae_attention_paths(lowerCamelCase_)
lowerCAmelCase__ : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,additional_replacements=[meta_path] ,config=lowerCamelCase_)
conv_attn_to_linear(lowerCamelCase_)
return new_checkpoint
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : str ,):
'''simple docstring'''
lowerCAmelCase__ : Any = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''')
lowerCAmelCase__ : Any = io.BytesIO(r.content)
lowerCAmelCase__ : Optional[int] = OmegaConf.load(lowerCamelCase_)
lowerCAmelCase__ : Dict = 512
lowerCAmelCase__ : str = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors'''):
from safetensors import safe_open
lowerCAmelCase__ : List[str] = {}
with safe_open(lowerCamelCase_ ,framework='''pt''' ,device='''cpu''') as f:
for key in f.keys():
lowerCAmelCase__ : Dict = f.get_tensor(lowerCamelCase_)
else:
lowerCAmelCase__ : List[Any] = torch.load(lowerCamelCase_ ,map_location=lowerCamelCase_)['''state_dict''']
# Convert the VAE model.
lowerCAmelCase__ : str = create_vae_diffusers_config(lowerCamelCase_ ,image_size=lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = custom_convert_ldm_vae_checkpoint(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = AutoencoderKL(**lowerCamelCase_)
vae.load_state_dict(lowerCamelCase_)
vae.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
__snake_case : Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
__snake_case : List[str] =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 359 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : str ={
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple =['ConditionalDetrFeatureExtractor']
__snake_case : Union[str, Any] =['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =[
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__snake_case : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 | 0 |
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""")
a_ : Any = str(bin(a__))
binary_number += "0" * shift_amount
return binary_number
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""")
a_ : Tuple = str(bin(a__))[2:]
if shift_amount >= len(a__):
return "0b0"
a_ : Optional[Any] = binary_number[: len(a__) - shift_amount]
return "0b" + shifted_binary_number
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
a_ : Dict = """0""" + str(bin(a__)).strip("""-""")[2:]
else: # Get binary (2's complement) representation of negative number
a_ : List[Any] = len(bin(a__)[3:]) # Find 2's complement of number
a_ : Union[str, Any] = bin(abs(a__) - (1 << binary_number_length))[3:]
a_ : str = (
"""1""" + """0""" * (binary_number_length - len(a__)) + binary_number
)
if shift_amount >= len(a__):
return "0b" + binary_number[0] * len(a__)
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(a__) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 248 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
__snake_case : Any = logging.getLogger(__name__)
__snake_case : Any = {"""facebook/bart-base""": BartForConditionalGeneration}
__snake_case : Tuple = {"""facebook/bart-base""": BartTokenizer}
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[str] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""")
parser.add_argument(
"""--validation_file""" , type=a__ , default=a__ , help="""A csv or a json file containing the validation data.""")
parser.add_argument(
"""--max_length""" , type=a__ , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=a__ , default=a__ , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=a__ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=a__ , )
parser.add_argument(
"""--config_name""" , type=a__ , default=a__ , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=a__ , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=a__ , default=a__ , help="""Where to store the final ONNX file.""")
a_ : Any = parser.parse_args()
return args
def _UpperCAmelCase ( a__ , a__="cpu"):
'''simple docstring'''
a_ : Optional[int] = model_dict[model_name].from_pretrained(a__).to(a__)
a_ : List[str] = tokenizer_dict[model_name].from_pretrained(a__)
if model_name in ["facebook/bart-base"]:
a_ : Tuple = 0
a_ : Optional[int] = None
a_ : Union[str, Any] = 0
return huggingface_model, tokenizer
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
model.eval()
a_ : Optional[Any] = None
a_ : Optional[Any] = torch.jit.script(BARTBeamSearchGenerator(a__))
with torch.no_grad():
a_ : Any = """My friends are cool but they eat too many carbs."""
a_ : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""").to(model.device)
a_ : Optional[int] = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=a__ , max_length=a__ , early_stopping=a__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
a__ , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , a__ , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=a__ , )
logger.info("""Model exported to {}""".format(a__))
a_ : List[str] = remove_dup_initializers(os.path.abspath(a__))
logger.info("""Deduplicated and optimized model written to {}""".format(a__))
a_ : Union[str, Any] = onnxruntime.InferenceSession(a__)
a_ : Any = ort_sess.run(
a__ , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(a__),
"""max_length""": np.array(a__),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3)
logger.info("""Model outputs from torch and ONNX Runtime are similar.""")
logger.info("""Success.""")
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[str] = parse_args()
a_ : str = 5
a_ : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO)
transformers.utils.logging.set_verbosity_error()
a_ : int = torch.device(args.device)
a_ , a_ : Optional[Any] = load_model_tokenizer(args.model_name_or_path , a__)
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""")
model.to(a__)
if args.max_length:
a_ : List[str] = args.max_length
if args.num_beams:
a_ : Optional[Any] = args.num_beams
if args.output_file_path:
a_ : Optional[int] = args.output_file_path
else:
a_ : Tuple = """BART.onnx"""
logger.info("""Exporting model to ONNX""")
export_and_validate_model(a__ , a__ , a__ , a__ , a__)
if __name__ == "__main__":
main()
| 248 | 1 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( *lowerCamelCase_ ,**lowerCamelCase_ ) -> int:
pass
def _A ( _a : Image ):
"""simple docstring"""
A = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def _A ( _a : Image ):
"""simple docstring"""
A = np.array(_a )
A = npimg.shape
return {"hash": hashimage(_a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCamelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Tuple:
A = MaskGenerationPipeline(model=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[int]:
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def UpperCamelCase__ ( self ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase__ ( self ) -> Optional[int]:
A = pipeline("""mask-generation""" ,model="""facebook/sam-vit-huge""" )
A = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" ,points_per_batch=2_5_6 )
# Shortening by hashing
A = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.04_44},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_21},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.01_67},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.01_32},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.00_53},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.99_67},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_93},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.99_09},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.98_79},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.98_34},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.97_16},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.96_12},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.95_99},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.95_52},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.95_32},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.95_16},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.94_99},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.94_83},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.94_64},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_43},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_43},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.94_08},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.93_35},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.93_26},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.92_62},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.89_99},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.89_86},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.89_84},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.88_73},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.88_71}
] ,)
# fmt: on
@require_torch
@slow
def UpperCamelCase__ ( self ) -> str:
A = """facebook/sam-vit-huge"""
A = pipeline("""mask-generation""" ,model=lowerCamelCase_ )
A = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" ,pred_iou_thresh=1 ,points_per_batch=2_5_6 )
# Shortening by hashing
A = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase_ ,decimals=4 ) ,[
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.04_44},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.02_10},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.01_67},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.01_32},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.00_53},
] ,)
| 77 |
"""simple docstring"""
def _A ( ):
"""simple docstring"""
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
UpperCAmelCase =generate_large_matrix()
UpperCAmelCase =(
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _A ( _a : list[list[int]] ):
"""simple docstring"""
assert all(row == sorted(_a , reverse=_a ) for row in grid )
assert all(list(_a ) == sorted(_a , reverse=_a ) for col in zip(*_a ) )
def _A ( _a : list[int] ):
"""simple docstring"""
A = 0
A = len(_a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
A = (left + right) // 2
A = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
A = mid + 1
else:
A = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_a )
def _A ( _a : list[list[int]] ):
"""simple docstring"""
A = 0
A = len(grid[0] )
for i in range(len(_a ) ):
A = find_negative_index(grid[i][:bound] )
total += bound
return (len(_a ) * len(grid[0] )) - total
def _A ( _a : list[list[int]] ):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def _A ( _a : list[list[int]] ):
"""simple docstring"""
A = 0
for row in grid:
for i, number in enumerate(_a ):
if number < 0:
total += len(_a ) - i
break
return total
def _A ( ):
"""simple docstring"""
from timeit import timeit
print("""Running benchmarks""" )
A = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
A = timeit(f'{func}(grid=grid)' , setup=_a , number=5_0_0 )
print(f'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 77 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase__ : Optional[List[bool]]
lowerCAmelCase__ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 325 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = "mask2former"
lowerCAmelCase__ : List[Any] = ["swin"]
lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"}
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowercase = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = backbone_config.pop('model_type' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
__lowercase = backbone_config
__lowercase = feature_size
__lowercase = mask_feature_size
__lowercase = hidden_dim
__lowercase = encoder_feedforward_dim
__lowercase = activation_function
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = num_attention_heads
__lowercase = dropout
__lowercase = dim_feedforward
__lowercase = pre_norm
__lowercase = enforce_input_projection
__lowercase = common_stride
__lowercase = ignore_value
__lowercase = num_queries
__lowercase = no_object_weight
__lowercase = class_weight
__lowercase = mask_weight
__lowercase = dice_weight
__lowercase = train_num_points
__lowercase = oversample_ratio
__lowercase = importance_sample_ratio
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = use_auxiliary_loss
__lowercase = feature_strides
__lowercase = output_auxiliary_logits
__lowercase = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : str ) -> Dict[str, any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 325 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''CLIPFeatureExtractor''']
__lowerCAmelCase = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 288 |
import torch
from diffusers import StableDiffusionPipeline
__lowerCAmelCase = '''path-to-your-trained-model'''
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
__lowerCAmelCase = '''A photo of sks dog in a bucket'''
__lowerCAmelCase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 288 | 1 |
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Optional[int] = str(a__ )
while len(a__ ) != 1:
SCREAMING_SNAKE_CASE : str = [int(a__ ) for i in num_string]
SCREAMING_SNAKE_CASE : str = 1
for i in range(0 , len(a__ ) ):
total *= numbers[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = str(a__ )
steps += 1
return steps
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Tuple = str(a__ )
while len(a__ ) != 1:
SCREAMING_SNAKE_CASE : Dict = [int(a__ ) for i in num_string]
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(0 , len(a__ ) ):
total += numbers[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = str(a__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a__ : Optional[int] = {
'''allenai/led-base-16384''': 16_384,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[Any] = '''post_processor'''
SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Any = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict:
SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 313 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=18 , lowerCAmelCase_=30 , lowerCAmelCase_=4_00 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , ):
"""simple docstring"""
_snake_case = size if size is not None else {'shortest_edge': 20}
_snake_case = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_flip_channel_order
def lowerCamelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = MobileViTImageProcessor if is_vision_available() else None
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_flip_channel_order' ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 160 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase : int = NewType("DataClass", Any)
lowercase : Dict = NewType("DataClassType", Any)
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[Any]:
if isinstance(__A , __A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def SCREAMING_SNAKE_CASE__ ( __A ) -> Callable[[str], Any]:
_snake_case = {str(__A ): choice for choice in choices}
return lambda __A : str_to_choice.get(__A , __A )
def SCREAMING_SNAKE_CASE__ ( *,
__A = None , __A = None , __A = dataclasses.MISSING , __A = dataclasses.MISSING , __A = None , **__A , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_snake_case = {}
if aliases is not None:
_snake_case = aliases
if help is not None:
_snake_case = help
return dataclasses.field(metadata=__A , default=__A , default_factory=__A , **__A )
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
def __init__( self , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
if "formatter_class" not in kwargs:
_snake_case = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCAmelCase_ )
if dataclasses.is_dataclass(lowerCAmelCase_ ):
_snake_case = [dataclass_types]
_snake_case = list(lowerCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCAmelCase_ )
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = F'--{field.name}'
_snake_case = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCAmelCase_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
_snake_case = kwargs.pop('aliases' , [] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = [aliases]
_snake_case = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(lowerCAmelCase_ , 'UnionType' ) and isinstance(lowerCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(lowerCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
_snake_case = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_snake_case = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_snake_case = (
field.type.__args__[0] if isinstance(lowerCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
_snake_case = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_snake_case = {}
if origin_type is Literal or (isinstance(field.type , lowerCAmelCase_ ) and issubclass(field.type , lowerCAmelCase_ )):
if origin_type is Literal:
_snake_case = field.type.__args__
else:
_snake_case = [x.value for x in field.type]
_snake_case = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
_snake_case = field.default
else:
_snake_case = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_snake_case = copy(lowerCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
_snake_case = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_snake_case = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_snake_case = default
# This tells argparse we accept 0 or 1 value after --field_name
_snake_case = '?'
# This is the value that will get picked if we do --field_name (without value)
_snake_case = True
elif isclass(lowerCAmelCase_ ) and issubclass(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = field.type.__args__[0]
_snake_case = '+'
if field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
elif field.default is dataclasses.MISSING:
_snake_case = True
else:
_snake_case = field.type
if field.default is not dataclasses.MISSING:
_snake_case = field.default
elif field.default_factory is not dataclasses.MISSING:
_snake_case = field.default_factory()
else:
_snake_case = True
parser.add_argument(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_snake_case = False
parser.add_argument(F'--no_{field.name}' , action='store_false' , dest=field.name , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if hasattr(lowerCAmelCase_ , '_argument_group_name' ):
_snake_case = self.add_argument_group(dtype._argument_group_name )
else:
_snake_case = self
try:
_snake_case = get_type_hints(lowerCAmelCase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCAmelCase_ ):
_snake_case = '.'.join(map(lowerCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(lowerCAmelCase_ ):
if not field.init:
continue
_snake_case = type_hints[field.name]
self._parse_dataclass_field(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_snake_case = []
if args_filename:
args_files.append(Path(lowerCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_snake_case = ArgumentParser()
args_file_parser.add_argument(lowerCAmelCase_ , type=lowerCAmelCase_ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
_snake_case , _snake_case = args_file_parser.parse_known_args(args=lowerCAmelCase_ )
_snake_case = vars(lowerCAmelCase_ ).get(args_file_flag.lstrip('-' ) , lowerCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowerCAmelCase_ ) for p in cmd_args_file_paths] )
_snake_case = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_snake_case = file_args + args if args is not None else file_args + sys.argv[1:]
_snake_case , _snake_case = self.parse_known_args(args=lowerCAmelCase_ )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
_snake_case = {k: v for k, v in vars(lowerCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
_snake_case = set(args.keys() )
_snake_case = []
for dtype in self.dataclass_types:
_snake_case = {f.name for f in dataclasses.fields(lowerCAmelCase_ ) if f.init}
_snake_case = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_snake_case = dtype(**lowerCAmelCase_ )
outputs.append(lowerCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(lowerCAmelCase_ )}' )
return tuple(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
with open(Path(lowerCAmelCase_ ) , encoding='utf-8' ) as open_json_file:
_snake_case = json.loads(open_json_file.read() )
_snake_case = self.parse_dict(lowerCAmelCase_ , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False ):
"""simple docstring"""
_snake_case = self.parse_dict(yaml.safe_load(Path(lowerCAmelCase_ ).read_text() ) , allow_extra_keys=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 160 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Union[str, Any] =logging.get_logger(__name__)
_UpperCAmelCase : Dict ="▁"
_UpperCAmelCase : Optional[Any] ={"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : Tuple ={
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Optional[Any] ={
"facebook/xglm-564M": 2048,
}
class snake_case__( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Dict = ['input_ids', 'attention_mask']
def __init__( self , __lowercase , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase = None , **__lowercase , ) -> None:
lowerCAmelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowerCAmelCase_ : List[Any] = 7
lowerCAmelCase_ : List[Any] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowerCAmelCase_ : List[Any] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
lowerCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
lowerCAmelCase_ : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase_ : Optional[Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase_ : List[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowerCAmelCase_ : int = len(self.sp_model )
lowerCAmelCase_ : Union[str, Any] = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCAmelCase )
lowerCAmelCase_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
lowerCAmelCase_ : str = self.__dict__.copy()
lowerCAmelCase_ : str = None
lowerCAmelCase_ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __lowercase ) -> List[str]:
lowerCAmelCase_ : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowerCAmelCase_ : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowercase_ ( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase ))
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase ))
def lowercase_ ( self , __lowercase , __lowercase = None ) -> List[int]:
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowercase_ ( self ) -> str:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self , __lowercase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowercase_ ( self , __lowercase ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase_ : Tuple = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ ( self , __lowercase ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase_ ( self , __lowercase ) -> Any:
lowerCAmelCase_ : List[Any] = ''''''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ''' ''' ).strip()
return out_string
def lowercase_ ( self , __lowercase , __lowercase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ : int = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
lowerCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,) | 262 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Dict = 'detr'
lowerCamelCase__ : Union[str, Any] = ['past_key_values']
lowerCamelCase__ : Tuple = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] , __UpperCAmelCase : Any=True , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : int=1_0_0 , __UpperCAmelCase : Optional[Any]=6 , __UpperCAmelCase : List[str]=2_0_4_8 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : Optional[int]=6 , __UpperCAmelCase : Dict=2_0_4_8 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Any="relu" , __UpperCAmelCase : Dict=2_5_6 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Optional[int]=1.0 , __UpperCAmelCase : Dict=False , __UpperCAmelCase : str="sine" , __UpperCAmelCase : str="resnet50" , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : int=False , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[Any]=1 , __UpperCAmelCase : Union[str, Any]=1 , __UpperCAmelCase : Union[str, Any]=5 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : List[str]=0.1 , **__UpperCAmelCase : Dict , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ = config_class.from_dict(__UpperCAmelCase )
# set timm attributes to None
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None, None, None
SCREAMING_SNAKE_CASE__ = use_timm_backbone
SCREAMING_SNAKE_CASE__ = backbone_config
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = num_queries
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = init_xavier_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = auxiliary_loss
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = backbone
SCREAMING_SNAKE_CASE__ = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE__ = class_cost
SCREAMING_SNAKE_CASE__ = bbox_cost
SCREAMING_SNAKE_CASE__ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.d_model
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , __UpperCAmelCase : PretrainedConfig , **__UpperCAmelCase : Dict ) -> List[Any]:
return cls(backbone_config=__UpperCAmelCase , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, any]:
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE__ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
class lowerCamelCase (A__ ):
lowerCamelCase__ : Union[str, Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> float:
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return 1_2
| 165 | 0 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowercase : list[int], lowercase : list[int], lowercase : int ) -> tuple[float, list[float]]:
"""simple docstring"""
_UpperCamelCase = list(range(len(lowercase ) ) )
_UpperCamelCase = [v / w for v, w in zip(lowercase, lowercase )]
index.sort(key=lambda lowercase : ratio[i], reverse=lowercase )
_UpperCamelCase = 0
_UpperCamelCase = [0] * len(lowercase )
for i in index:
if weight[i] <= capacity:
_UpperCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
_UpperCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = 'biogpt'
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=42384 , lowerCAmelCase__ : Optional[int]=1024 , lowerCAmelCase__ : List[str]=24 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : Optional[int]=4096 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Tuple=1e-1_2 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : Optional[Any]=2 , **lowerCAmelCase__ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_cache
_UpperCamelCase = layerdrop
_UpperCamelCase = activation_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 287 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : int
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
def a_ ( ):
'''simple docstring'''
lowercase__ : Union[str, Any] = Node(1 )
lowercase__ : List[str] = Node(2 )
lowercase__ : List[str] = Node(3 )
lowercase__ : int = Node(4 )
lowercase__ : Tuple = Node(5 )
return tree
def a_ ( _lowerCAmelCase : Node | None ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a_ ( _lowerCAmelCase : Node | None ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a_ ( _lowerCAmelCase : Node | None ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a_ ( _lowerCAmelCase : Node | None ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a_ ( _lowerCAmelCase : Node | None ):
'''simple docstring'''
lowercase__ : list[Any] = []
if root is None:
return output
lowercase__ : Dict = deque([root] )
while process_queue:
lowercase__ : str = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a_ ( _lowerCAmelCase : Node | None , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : list[Any] = []
def populate_output(_lowerCAmelCase : Node | None , _lowerCAmelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_lowerCAmelCase , _lowerCAmelCase )
return output
def a_ ( _lowerCAmelCase : Node | None , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : list[Any] = []
def populate_output(_lowerCAmelCase : Node | None , _lowerCAmelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_lowerCAmelCase , _lowerCAmelCase )
return output
def a_ ( _lowerCAmelCase : Node | None ):
'''simple docstring'''
if root is None:
return []
lowercase__ : list[Sequence[Node | None]] = []
lowercase__ : Any = 0
lowercase__ : Dict = height(_lowerCAmelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase__ : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase__ : List[Any] = 0
return output
def a_ ( ): # Main function for testing.
'''simple docstring'''
lowercase__ : Union[str, Any] = make_tree()
print(f"""In-order Traversal: {inorder(_lowerCAmelCase )}""" )
print(f"""Pre-order Traversal: {preorder(_lowerCAmelCase )}""" )
print(f"""Post-order Traversal: {postorder(_lowerCAmelCase )}""" , '\n' )
print(f"""Height of Tree: {height(_lowerCAmelCase )}""" , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(_lowerCAmelCase ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(_lowerCAmelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(_lowerCAmelCase , level=_lowerCAmelCase ) )
print('\nZigZag order Traversal: ' )
print(zigzag(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 77 | """simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def a_ ( _lowerCAmelCase : Callable[[int | float], int | float] , _lowerCAmelCase : int | float , _lowerCAmelCase : int | float , _lowerCAmelCase : int = 100 , ):
'''simple docstring'''
lowercase__ : Dict = x_start
lowercase__ : Union[str, Any] = fnc(_lowerCAmelCase )
lowercase__ : Optional[Any] = 0.0
for _ in range(_lowerCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ : Union[str, Any] = (x_end - x_start) / steps + xa
lowercase__ : Union[str, Any] = fnc(_lowerCAmelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ : Union[str, Any] = xa
lowercase__ : int = fxa
return length
if __name__ == "__main__":
def a_ ( _lowerCAmelCase : List[Any] ):
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
_UpperCamelCase : str = 10
while i <= 10_00_00:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 77 | 1 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase ='__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , UpperCamelCase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase ='__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , UpperCamelCase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase ='__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , UpperCamelCase__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase ='__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , UpperCamelCase__ ) is None
with patch_submodule(_test_patching , 'len' , UpperCamelCase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase ='__test_patch_submodule_start_and_stop_mock__'
__lowercase =patch_submodule(_test_patching , 'open' , UpperCamelCase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase ='__test_patch_submodule_successive_join__'
__lowercase ='__test_patch_submodule_successive_dirname__'
__lowercase ='__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.rename' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , UpperCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.join' , UpperCamelCase__ ):
with patch_submodule(_test_patching , 'os.path.dirname' , UpperCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase ='__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , UpperCamelCase__ ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , UpperCamelCase__ ):
pass
| 367 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """CLIPImageProcessor"""
lowerCAmelCase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[Any] , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : str):
'''simple docstring'''
__lowercase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCAmelCase , )
__lowercase =kwargs.pop('feature_extractor')
__lowercase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_lowerCAmelCase , _lowerCAmelCase)
def __call__( self : List[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=None , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
__lowercase =self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
if images is not None:
__lowercase =self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
if text is not None and images is not None:
__lowercase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase) , tensor_type=_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple , *_lowerCAmelCase : str , **_lowerCAmelCase : int):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =self.tokenizer.model_input_names
__lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCAmelCase , )
return self.image_processor_class
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCAmelCase , )
return self.image_processor
| 48 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ : Union[str, Any] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any]=8 ) -> Union[str, Any]:
_UpperCAmelCase : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCAmelCase : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Any=512, _lowerCAmelCase : Optional[Any]=512 ) -> Optional[Any]:
_UpperCAmelCase : Optional[int] = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1 )
_UpperCAmelCase : List[Any] = np.array(pil_image.convert("""RGB""" ) )
_UpperCAmelCase : str = arr.astype(np.floataa ) / 127.5 - 1
_UpperCAmelCase : Dict = np.transpose(_lowerCAmelCase, [2, 0, 1] )
_UpperCAmelCase : Union[str, Any] = torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( __a):
def __init__( self , _A , _A , _A , ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
_UpperCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self , _A , _A , _A ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = min(int(num_inference_steps * strength ) , _A )
_UpperCAmelCase : Dict = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A=None ) -> List[Any]:
'''simple docstring'''
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}''' )
_UpperCAmelCase : Any = image.to(device=_A , dtype=_A )
_UpperCAmelCase : Optional[int] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_UpperCAmelCase : Dict = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(_A , _A ):
_UpperCAmelCase : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
_UpperCAmelCase : List[Any] = torch.cat(_A , dim=0 )
else:
_UpperCAmelCase : str = self.movq.encode(_A ).latent_dist.sample(_A )
_UpperCAmelCase : Any = self.movq.config.scaling_factor * init_latents
_UpperCAmelCase : List[Any] = torch.cat([init_latents] , dim=0 )
_UpperCAmelCase : Union[str, Any] = init_latents.shape
_UpperCAmelCase : List[Any] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
_UpperCAmelCase : Optional[int] = self.scheduler.add_noise(_A , _A , _A )
_UpperCAmelCase : Optional[int] = init_latents
return latents
def __snake_case ( self , _A=0 ) -> Optional[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_UpperCAmelCase : List[Any] = torch.device(f'''cuda:{gpu_id}''' )
_UpperCAmelCase : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def __snake_case ( self , _A=0 ) -> int:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_UpperCAmelCase : int = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase : Tuple = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
_UpperCAmelCase : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 5_12 , _A = 5_12 , _A = 1_00 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self._execution_device
_UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(_A , _A ):
_UpperCAmelCase : Dict = torch.cat(_A , dim=0 )
_UpperCAmelCase : Any = image_embeds.shape[0]
if isinstance(_A , _A ):
_UpperCAmelCase : Any = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase : str = image_embeds.repeat_interleave(_A , dim=0 )
_UpperCAmelCase : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
_UpperCAmelCase : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
_UpperCAmelCase : str = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'''Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
_UpperCAmelCase : Union[str, Any] = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
_UpperCAmelCase : List[Any] = image.to(dtype=image_embeds.dtype , device=_A )
_UpperCAmelCase : int = self.movq.encode(_A )["""latents"""]
_UpperCAmelCase : Dict = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(_A , _A , _A )
_UpperCAmelCase : Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_UpperCAmelCase , _UpperCAmelCase : str = downscale_height_and_width(_A , _A , self.movq_scale_factor )
_UpperCAmelCase : List[Any] = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase : Union[str, Any] = {"""image_embeds""": image_embeds}
_UpperCAmelCase : str = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = variance_pred.chunk(2 )
_UpperCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[Any] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
_UpperCAmelCase : Optional[int] = self.movq.decode(_A , force_not_quantize=_A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_UpperCAmelCase : Any = image * 0.5 + 0.5
_UpperCAmelCase : Dict = image.clamp(0 , 1 )
_UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase : List[str] = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 246 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCamelCase__ : Any = 10
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : int, _lowerCAmelCase : list[int], _lowerCAmelCase : int ) -> int:
for i in range(_lowerCAmelCase, _lowerCAmelCase ):
if array[i] == target:
return i
return -1
def UpperCamelCase ( _lowerCAmelCase : list[int], _lowerCAmelCase : int ) -> int:
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Optional[int] = len(_lowerCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : str = (left + right) // 3 + 1
_UpperCAmelCase : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_UpperCAmelCase : Tuple = one_third - 1
elif array[two_third] < target:
_UpperCAmelCase : Any = two_third + 1
else:
_UpperCAmelCase : Any = one_third + 1
_UpperCAmelCase : Dict = two_third - 1
else:
return -1
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : int, _lowerCAmelCase : list[int], _lowerCAmelCase : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = (left + right) // 3 + 1
_UpperCAmelCase : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCAmelCase, one_third - 1, _lowerCAmelCase, _lowerCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
else:
return rec_ternary_search(one_third + 1, two_third - 1, _lowerCAmelCase, _lowerCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
lowerCamelCase__ : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowerCamelCase__ : List[Any] = int(input('''Enter the number to be found in the list:\n''').strip())
lowerCamelCase__ : str = ite_ternary_search(collection, target)
lowerCamelCase__ : List[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 246 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class A__ ( _snake_case , _snake_case ):
lowercase = "swin"
lowercase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , UpperCamelCase__=224 , UpperCamelCase__=4 , UpperCamelCase__=3 , UpperCamelCase__=96 , UpperCamelCase__=[2, 2, 6, 2] , UpperCamelCase__=[3, 6, 12, 24] , UpperCamelCase__=7 , UpperCamelCase__=4.0 , UpperCamelCase__=True , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__="gelu" , UpperCamelCase__=False , UpperCamelCase__=0.02 , UpperCamelCase__=1e-5 , UpperCamelCase__=32 , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = embed_dim
A_ = depths
A_ = len(UpperCamelCase__ )
A_ = num_heads
A_ = window_size
A_ = mlp_ratio
A_ = qkv_bias
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = drop_path_rate
A_ = hidden_act
A_ = use_absolute_embeddings
A_ = layer_norm_eps
A_ = initializer_range
A_ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) )
A_ = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
A_ , A_ = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
class A__ ( _snake_case ):
lowercase = version.parse("1.11" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ) -> float:
'''simple docstring'''
return 1e-4
| 101 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class A__ :
def __init__( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = str(id_ )
A_ = None
A_ = None
A_ = []
A_ = {} # {vertex:distance}
def __lt__( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> Dict:
'''simple docstring'''
return self.id
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
self.neighbors.append(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = weight
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], UpperCAmelCase__ )
graph[b - 1].add_edge(graph[a - 1], UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list:
A_ = []
for u in graph:
A_ = math.inf
A_ = None
A_ = 0
A_ = graph[:]
while q:
A_ = min(UpperCAmelCase__ )
q.remove(UpperCAmelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
A_ = u
A_ = u.edges[v.id]
for i in range(1, len(UpperCAmelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Iterator[tuple]:
for u in graph:
A_ = math.inf
A_ = None
A_ = 0
A_ = list(UpperCAmelCase__ )
hq.heapify(UpperCAmelCase__ )
while h:
A_ = hq.heappop(UpperCAmelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
A_ = u
A_ = u.edges[v.id]
hq.heapify(UpperCAmelCase__ )
for i in range(1, len(UpperCAmelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | 1 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_UpperCamelCase : List[Any] = 'http://www.mocksite.com/file1.txt'
_UpperCamelCase : Optional[int] = '"text": ["foo", "foo"]'
_UpperCamelCase : Tuple = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class a :
UpperCAmelCase_ : Optional[Any] =200
UpperCAmelCase_ : str ={"Content-Length": "100"}
UpperCAmelCase_ : Any ={}
def UpperCamelCase_ ( self , **_lowerCamelCase ):
return [bytes(_lowerCamelCase , 'utf-8' )]
def _SCREAMING_SNAKE_CASE ( *__snake_case : List[Any] , **__snake_case : List[str] ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
import requests
monkeypatch.setattr(__snake_case , 'request' , __snake_case )
lowercase = URL
if issubclass(__snake_case , __snake_case ):
lowercase = url
elif issubclass(__snake_case , __snake_case ):
lowercase = [url]
elif issubclass(__snake_case , __snake_case ):
lowercase = {'train': url}
lowercase = 'dummy'
lowercase = 'downloads'
lowercase = tmp_path
lowercase = DownloadConfig(
cache_dir=os.path.join(__snake_case , __snake_case ) , use_etag=__snake_case , )
lowercase = DownloadManager(dataset_name=__snake_case , download_config=__snake_case )
lowercase = dl_manager.download(__snake_case )
lowercase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__snake_case , __snake_case ):
lowercase = [downloaded_paths]
lowercase = [urls]
elif isinstance(__snake_case , __snake_case ):
assert "train" in downloaded_paths.keys()
lowercase = downloaded_paths.values()
lowercase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__snake_case , __snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowercase = Path(__snake_case )
lowercase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowercase = downloaded_path.read_text()
assert content == CONTENT
lowercase = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
lowercase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def _SCREAMING_SNAKE_CASE ( __snake_case : Any , __snake_case : Any , __snake_case : Dict ):
'''simple docstring'''
lowercase = str(__snake_case )
if issubclass(__snake_case , __snake_case ):
lowercase = filename
elif issubclass(__snake_case , __snake_case ):
lowercase = [filename]
elif issubclass(__snake_case , __snake_case ):
lowercase = {'train': filename}
lowercase = 'dummy'
lowercase = xz_file.parent
lowercase = 'extracted'
lowercase = DownloadConfig(
cache_dir=__snake_case , use_etag=__snake_case , )
lowercase = DownloadManager(dataset_name=__snake_case , download_config=__snake_case )
lowercase = dl_manager.extract(__snake_case )
lowercase = paths
for extracted_paths in [extracted_paths]:
if isinstance(__snake_case , __snake_case ):
lowercase = [extracted_paths]
lowercase = [paths]
elif isinstance(__snake_case , __snake_case ):
assert "train" in extracted_paths.keys()
lowercase = extracted_paths.values()
lowercase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__snake_case , __snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowercase = Path(__snake_case )
lowercase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__snake_case , etag=__snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowercase = extracted_path.read_text()
lowercase = text_file.read_text()
assert extracted_file_content == expected_file_content
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] , __snake_case : str ):
'''simple docstring'''
assert path.endswith('.jsonl' )
for num_items, line in enumerate(__snake_case , start=1 ):
lowercase = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : List[str] ):
'''simple docstring'''
lowercase = request.getfixturevalue(__snake_case )
lowercase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ) , start=1 ):
_test_jsonl(__snake_case , __snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
lowercase = request.getfixturevalue(__snake_case )
lowercase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__snake_case ) , start=1 ):
_test_jsonl(__snake_case , __snake_case )
assert num_tar == 1
assert num_jsonl == 2
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict ):
'''simple docstring'''
lowercase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__snake_case ) , start=1 ):
assert os.path.basename(__snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 220 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 4_00_00_00 ):
'''simple docstring'''
lowercase = []
lowercase , lowercase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
lowercase , lowercase = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 220 | 1 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 1_000_000 ) -> Dict:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : Any = {1: 1}
for inputa in range(2 , UpperCAmelCase_ ):
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Union[str, Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_UpperCAmelCase : Optional[int] = (3 * number) + 1
counter += 1
if inputa not in counters:
_UpperCAmelCase : Any = counter
if counter > pre_counter:
_UpperCAmelCase : Optional[int] = inputa
_UpperCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 353 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase__ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCamelCase__ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCamelCase__ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCamelCase__ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] ) ->int:
'''simple docstring'''
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int=0.9 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : Dict=0.5 ) ->Any:
'''simple docstring'''
if NLTK_VERSION >= version.Version("3.6.5" ):
_UpperCAmelCase : Dict = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase__ ) , word_tokenize(lowerCamelCase__ ) , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ )
for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ )
]
else:
_UpperCAmelCase : Optional[int] = [
meteor_score.single_meteor_score(lowerCamelCase__ , lowerCamelCase__ , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ )
for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ )
]
return {"meteor": np.mean(lowerCamelCase__ )}
| 322 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[str] = 'umt5'
UpperCAmelCase__ : Optional[int] = ['past_key_values']
def __init__( self: str , UpperCamelCase_: Optional[Any]=25_01_12 , UpperCamelCase_: Tuple=5_12 , UpperCamelCase_: int=64 , UpperCamelCase_: Union[str, Any]=10_24 , UpperCamelCase_: Tuple=8 , UpperCamelCase_: List[Any]=None , UpperCamelCase_: int=6 , UpperCamelCase_: Dict=32 , UpperCamelCase_: List[Any]=1_28 , UpperCamelCase_: str=0.1 , UpperCamelCase_: List[Any]=1E-6 , UpperCamelCase_: str=1.0 , UpperCamelCase_: int="gated-gelu" , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]="T5Tokenizer" , UpperCamelCase_: Dict=True , UpperCamelCase_: Any=0 , UpperCamelCase_: int=1 , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: List[Any] , ):
super().__init__(
is_encoder_decoder=UpperCamelCase_ , tokenizer_class=UpperCamelCase_ , tie_word_embeddings=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = vocab_size
__lowerCamelCase = d_model
__lowerCamelCase = d_kv
__lowerCamelCase = d_ff
__lowerCamelCase = num_layers
__lowerCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowerCamelCase = num_heads
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = relative_attention_max_distance
__lowerCamelCase = dropout_rate
__lowerCamelCase = layer_norm_epsilon
__lowerCamelCase = initializer_factor
__lowerCamelCase = feed_forward_proj
__lowerCamelCase = use_cache
__lowerCamelCase = self.feed_forward_proj.split("""-""" )
__lowerCamelCase = act_info[-1]
__lowerCamelCase = act_info[0] == """gated"""
if len(UpperCamelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCamelCase_ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
__lowerCamelCase = """gelu_new"""
@property
def lowerCAmelCase__ ( self: int ):
return self.d_model
@property
def lowerCAmelCase__ ( self: int ):
return self.num_heads
@property
def lowerCAmelCase__ ( self: Optional[int] ):
return self.num_layers
class lowerCamelCase__( __lowerCamelCase):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__lowerCamelCase = """past_encoder_sequence + sequence"""
__lowerCamelCase = {0: """batch"""}
__lowerCamelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """decoder_sequence"""}
__lowerCamelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowerCAmelCase__ ( self: List[Any] ):
return 13
@property
def lowerCAmelCase__ ( self: int ):
return 5E-4
| 12 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess')
def lowerCamelCase__ ( A__ : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A__ ) != count_coins(A__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(A__ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.left )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.right )
__lowerCamelCase = 1 - left_distrib_excess
__lowerCamelCase = 1 - right_distrib_excess
__lowerCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A__ )
+ abs(A__ )
)
__lowerCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A__ , A__ )
return get_distrib(A__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 1 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = inspect.getfile(accelerate.test_utils )
A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
A = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def UpperCamelCase__ ( self ) -> Dict:
A = f'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
A = [sys.executable] + distributed_args
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
| 365 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase ="\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
UpperCAmelCase ="\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
UpperCAmelCase ="\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ,id="""token""" ) ,id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" ,id="""token""" ) ,id="""sequence""" ) ,id="""references""" ),
} ) ,codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] ,reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] ,)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=4 ,lowerCamelCase_=False ) -> str:
A = compute_bleu(
reference_corpus=lowerCamelCase_ ,translation_corpus=lowerCamelCase_ ,max_order=lowerCamelCase_ ,smooth=lowerCamelCase_ )
((A) , (A) , (A) , (A) , (A) , (A)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 77 | 0 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , _a : Tuple , _a : Any=sys.maxsize ) -> List[Any]:
__lowerCamelCase : List[str] = 'bilinear'
__lowerCamelCase : Any = max_size
__lowerCamelCase : List[str] = short_edge_length
def __call__( self : Optional[Any] , _a : Tuple ) -> Optional[int]:
__lowerCamelCase : List[Any] = []
for img in imgs:
__lowerCamelCase ,__lowerCamelCase : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
__lowerCamelCase : int = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__lowerCamelCase : List[str] = size * 1.0 / min(_a , _a )
if h < w:
__lowerCamelCase ,__lowerCamelCase : List[Any] = size, scale * w
else:
__lowerCamelCase ,__lowerCamelCase : Any = scale * h, size
if max(_a , _a ) > self.max_size:
__lowerCamelCase : Dict = self.max_size * 1.0 / max(_a , _a )
__lowerCamelCase : Tuple = newh * scale
__lowerCamelCase : Optional[int] = neww * scale
__lowerCamelCase : str = int(neww + 0.5 )
__lowerCamelCase : Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
__lowerCamelCase : int = Image.fromarray(_a )
__lowerCamelCase : Optional[int] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__lowerCamelCase : Optional[Any] = np.asarray(_a )
else:
__lowerCamelCase : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__lowerCamelCase : Tuple = nn.functional.interpolate(
_a , (newh, neww) , mode=self.interp_method , align_corners=_a ).squeeze(0 )
img_augs.append(_a )
return img_augs
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : str , _a : Optional[int] ) -> Optional[Any]:
__lowerCamelCase : List[str] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__lowerCamelCase : Dict = cfg.INPUT.FORMAT
__lowerCamelCase : Tuple = cfg.SIZE_DIVISIBILITY
__lowerCamelCase : Optional[int] = cfg.PAD_VALUE
__lowerCamelCase : List[str] = cfg.INPUT.MAX_SIZE_TEST
__lowerCamelCase : Dict = cfg.MODEL.DEVICE
__lowerCamelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__lowerCamelCase : List[str] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__lowerCamelCase : Optional[int] = lambda _a : (x - self.pixel_mean) / self.pixel_std
def _lowercase ( self : Optional[int] , _a : Optional[Any] ) -> Optional[int]:
__lowerCamelCase : Tuple = tuple(max(_a ) for s in zip(*[img.shape for img in images] ) )
__lowerCamelCase : Union[str, Any] = [im.shape[-2:] for im in images]
__lowerCamelCase : List[Any] = [
nn.functional.pad(
_a , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_a , _a )
]
return torch.stack(_a ), torch.tensor(_a )
def __call__( self : Optional[Any] , _a : Dict , _a : Tuple=False ) -> List[Any]:
with torch.no_grad():
if not isinstance(_a , _a ):
__lowerCamelCase : Tuple = [images]
if single_image:
assert len(_a ) == 1
for i in range(len(_a ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_a , images.pop(_a ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_a , torch.as_tensor(img_tensorize(images.pop(_a ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__lowerCamelCase : str = torch.tensor([im.shape[:2] for im in images] )
__lowerCamelCase : Dict = self.aug(_a )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__lowerCamelCase : Dict = [self.normalizer(_a ) for x in images]
# now pad them to do the following operations
__lowerCamelCase ,__lowerCamelCase : List[Any] = self.pad(_a )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__lowerCamelCase : Dict = torch.true_divide(_a , _a )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Union[str, Any]:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Dict:
assert torch.isfinite(_lowerCAmelCase ).all(), "Box tensor contains infinite or NaN!"
__lowerCamelCase ,__lowerCamelCase : List[str] = box_size
tensor[:, 0].clamp_(min=0 ,max=_lowerCAmelCase )
tensor[:, 1].clamp_(min=0 ,max=_lowerCAmelCase )
tensor[:, 2].clamp_(min=0 ,max=_lowerCAmelCase )
tensor[:, 3].clamp_(min=0 ,max=_lowerCAmelCase )
| 208 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> np.array:
__lowerCamelCase : Any = F'{sampling_rate}'
__lowerCamelCase : List[str] = '1'
__lowerCamelCase : int = 'f32le'
__lowerCamelCase : Dict = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(_lowerCAmelCase ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
__lowerCamelCase : Tuple = ffmpeg_process.communicate(_lowerCAmelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
__lowerCamelCase : Any = output_stream[0]
__lowerCamelCase : Union[str, Any] = np.frombuffer(_lowerCAmelCase ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = "f32le" ,) -> Dict:
__lowerCamelCase : Optional[Any] = F'{sampling_rate}'
__lowerCamelCase : Optional[int] = '1'
if format_for_conversion == "s16le":
__lowerCamelCase : List[Any] = 2
elif format_for_conversion == "f32le":
__lowerCamelCase : Tuple = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
__lowerCamelCase : Any = platform.system()
if system == "Linux":
__lowerCamelCase : Tuple = 'alsa'
__lowerCamelCase : Optional[Any] = 'default'
elif system == "Darwin":
__lowerCamelCase : Union[str, Any] = 'avfoundation'
__lowerCamelCase : Tuple = ':0'
elif system == "Windows":
__lowerCamelCase : List[str] = 'dshow'
__lowerCamelCase : Optional[Any] = 'default'
__lowerCamelCase : Optional[int] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
__lowerCamelCase : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowerCamelCase : int = _ffmpeg_stream(_lowerCAmelCase ,_lowerCAmelCase )
for item in iterator:
yield item
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = "f32le" ,) -> List[str]:
if stream_chunk_s is not None:
__lowerCamelCase : int = stream_chunk_s
else:
__lowerCamelCase : List[Any] = chunk_length_s
__lowerCamelCase : Dict = ffmpeg_microphone(_lowerCAmelCase ,_lowerCAmelCase ,format_for_conversion=_lowerCAmelCase )
if format_for_conversion == "s16le":
__lowerCamelCase : List[str] = np.intaa
__lowerCamelCase : Union[str, Any] = 2
elif format_for_conversion == "f32le":
__lowerCamelCase : Union[str, Any] = np.floataa
__lowerCamelCase : Optional[Any] = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
__lowerCamelCase : Any = chunk_length_s / 6
__lowerCamelCase : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_lowerCAmelCase ,(int, float) ):
__lowerCamelCase : Tuple = [stride_length_s, stride_length_s]
__lowerCamelCase : Union[str, Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowerCamelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowerCamelCase : Dict = datetime.datetime.now()
__lowerCamelCase : Any = datetime.timedelta(seconds=_lowerCAmelCase )
for item in chunk_bytes_iter(_lowerCAmelCase ,_lowerCAmelCase ,stride=(stride_left, stride_right) ,stream=_lowerCAmelCase ):
# Put everything back in numpy scale
__lowerCamelCase : Optional[int] = np.frombuffer(item['raw'] ,dtype=_lowerCAmelCase )
__lowerCamelCase : Tuple = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
__lowerCamelCase : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = False ) -> str:
__lowerCamelCase : Optional[int] = b''
__lowerCamelCase ,__lowerCamelCase : Any = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
__lowerCamelCase : str = 0
for raw in iterator:
acc += raw
if stream and len(_lowerCAmelCase ) < chunk_len:
__lowerCamelCase : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_lowerCAmelCase ) >= chunk_len:
# We are flushing the accumulator
__lowerCamelCase : Any = (_stride_left, stride_right)
__lowerCamelCase : Optional[int] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
__lowerCamelCase : List[str] = False
yield item
__lowerCamelCase : Tuple = stride_left
__lowerCamelCase : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_lowerCAmelCase ) > stride_left:
__lowerCamelCase : Tuple = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
__lowerCamelCase : List[str] = False
yield item
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Tuple:
__lowerCamelCase : int = 2**24 # 16Mo
try:
with subprocess.Popen(_lowerCAmelCase ,stdout=subprocess.PIPE ,bufsize=_lowerCAmelCase ) as ffmpeg_process:
while True:
__lowerCamelCase : Union[str, Any] = ffmpeg_process.stdout.read(_lowerCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 208 | 1 |
"""simple docstring"""
import math
import sys
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->str:
A__ : int = """"""
try:
with open(UpperCAmelCase__, """rb""" ) as binary_file:
A__ : Any = binary_file.read()
for dat in data:
A__ : Union[str, Any] = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->str:
A__ : Dict = {"""0""": """0""", """1""": """1"""}
A__ , A__ : str = """""", """"""
A__ : Union[str, Any] = len(UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A__ : Optional[Any] = lexicon[curr_string]
result += last_match_id
A__ : List[Any] = last_match_id + """0"""
if math.loga(UpperCAmelCase__ ).is_integer():
A__ : Optional[int] = {}
for curr_key in list(UpperCAmelCase__ ):
A__ : List[Any] = lexicon.pop(UpperCAmelCase__ )
A__ : int = new_lex
A__ : List[Any] = last_match_id + """1"""
index += 1
A__ : List[Any] = """"""
return result
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : str ) ->None:
A__ : Dict = 8
try:
with open(UpperCAmelCase__, """wb""" ) as opened_file:
A__ : Tuple = [
to_write[i : i + byte_length]
for i in range(0, len(UpperCAmelCase__ ), UpperCAmelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCAmelCase__, 2 ).to_bytes(1, byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->str:
A__ : List[Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A__ : Optional[Any] = data_bits[counter:]
A__ : List[Any] = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : str ) ->None:
A__ : Any = read_file_binary(UpperCAmelCase__ )
A__ : Any = remove_prefix(UpperCAmelCase__ )
A__ : Union[str, Any] = decompress_data(UpperCAmelCase__ )
write_file_binary(UpperCAmelCase__, UpperCAmelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 296 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model'''}
A_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
A_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
A_ = 0
A_ = 1
A_ = 2
A_ = 3
A_ = 4
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = 'left'
def __init__( self : Dict , snake_case : int , snake_case : List[Any]=False , snake_case : List[str]=True , snake_case : Dict=False , snake_case : Optional[Any]="<s>" , snake_case : List[str]="</s>" , snake_case : Tuple="<unk>" , snake_case : Tuple="<sep>" , snake_case : Union[str, Any]="<pad>" , snake_case : Dict="<cls>" , snake_case : Optional[Any]="<mask>" , snake_case : Optional[int]=["<eop>", "<eod>"] , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Dict , ):
'''simple docstring'''
A__ : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
A__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
A__ : str = 3
A__ : str = do_lower_case
A__ : Optional[Any] = remove_space
A__ : List[Any] = keep_accents
A__ : Union[str, Any] = vocab_file
A__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : int = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
'''simple docstring'''
A__ : int = self.__dict__.copy()
A__ : int = None
return state
def __setstate__( self : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Optional[int] = {}
A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] ):
'''simple docstring'''
if self.remove_space:
A__ : Optional[Any] = """ """.join(inputs.strip().split() )
else:
A__ : Dict = inputs
A__ : str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A__ : Any = unicodedata.normalize("""NFKD""" , snake_case )
A__ : Optional[int] = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
A__ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str ):
'''simple docstring'''
A__ : Dict = self.preprocess_text(snake_case )
A__ : Dict = self.sp_model.encode(snake_case , out_type=snake_case )
A__ : Optional[int] = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : int = cur_pieces[1:]
else:
A__ : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def _UpperCamelCase ( self : List[str] , snake_case : Tuple ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case )
def _UpperCamelCase ( self : List[str] , snake_case : Any ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = """""".join(snake_case ).replace(snake_case , """ """ ).strip()
return out_string
def _UpperCamelCase ( self : int , snake_case : List[int] , snake_case : bool = False , snake_case : bool = None , snake_case : bool = True , **snake_case : Union[str, Any] , ):
'''simple docstring'''
A__ : List[str] = kwargs.pop("""use_source_tokenizer""" , snake_case )
A__ : Any = self.convert_ids_to_tokens(snake_case , skip_special_tokens=snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A__ : Any = []
A__ : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
A__ : str = []
sub_texts.append(snake_case )
else:
current_sub_text.append(snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A__ : Dict = """""".join(snake_case )
A__ : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A__ : Tuple = self.clean_up_tokenization(snake_case )
return clean_text
else:
return text
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Tuple = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1, 1]
return ([0] * len(snake_case )) + [1, 1]
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Any = [self.sep_token_id]
A__ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self : Optional[Any] , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ : List[Any] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
A__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 296 | 1 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 21 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Any = KandinskyImgaImgPipeline
lowercase_ : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
lowercase_ : Any = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
lowercase_ : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ : Union[str, Any] = False
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return 1_00
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=10_05, )
_lowercase : Optional[int] = MultilingualCLIP(lowerCamelCase)
_lowercase : List[str] = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Union[str, Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase : Optional[Any] = UNetaDConditionModel(**lowerCamelCase)
return model
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.dummy_text_encoder
_lowercase : List[Any] = self.dummy_tokenizer
_lowercase : int = self.dummy_unet
_lowercase : int = self.dummy_movq
_lowercase : Optional[int] = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowercase : List[Any] = DDIMScheduler(**lowerCamelCase)
_lowercase : List[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Dict:
"""simple docstring"""
_lowercase : List[str] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[Any] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(lowerCamelCase)
# create init_image
_lowercase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[int] = image.cpu().permute(0, 2, 3, 1)[0]
_lowercase : Tuple = Image.fromarray(np.uinta(lowerCamelCase)).convert('RGB').resize((2_56, 2_56))
if str(lowerCamelCase).startswith('mps'):
_lowercase : List[str] = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = 'cpu'
_lowercase : Tuple = self.get_dummy_components()
_lowercase : str = self.pipeline_class(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = pipe(**self.get_dummy_inputs(lowerCamelCase))
_lowercase : Optional[int] = output.images
_lowercase : List[Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase), return_dict=lowerCamelCase, )[0]
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : Tuple = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy')
_lowercase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
_lowercase : Optional[int] = 'A red cartoon frog, 4k'
_lowercase : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior', torch_dtype=torch.floataa)
pipe_prior.to(lowerCamelCase)
_lowercase : Optional[Any] = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1', torch_dtype=torch.floataa)
_lowercase : List[Any] = pipeline.to(lowerCamelCase)
pipeline.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = torch.Generator(device='cpu').manual_seed(0)
_lowercase , _lowercase : List[Any] = pipe_prior(
lowerCamelCase, generator=lowerCamelCase, num_inference_steps=5, negative_prompt='', ).to_tuple()
_lowercase : Union[str, Any] = pipeline(
lowerCamelCase, image=lowerCamelCase, image_embeds=lowerCamelCase, negative_image_embeds=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=1_00, height=7_68, width=7_68, strength=0.2, output_type='np', )
_lowercase : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase)
| 21 | 1 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __magic_name__ (__lowercase ):
def __a ( self ) -> List[str]:
lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __a ( self ) -> int:
with self.assertRaises(_a ):
lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __a ( self ) -> Optional[int]:
with self.assertRaises(_a ):
lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __a ( self ) -> Optional[int]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def __a ( self ) -> Dict:
lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __a ( self ) -> int:
lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __a ( self ) -> Optional[Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __a ( self ) -> int:
import PIL.Image
lowerCAmelCase_ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=_a ) as mock_cast_to_python_objects:
lowerCAmelCase_ = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image() ) )
lowerCAmelCase_ , lowerCAmelCase_ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , _a )
self.assertFalse(kwargs["optimize_list_casting"] )
def A(__a: Optional[Any] , __a: int ):
lowerCAmelCase_ = pa.BufferReader(__a ) if isinstance(__a , pa.Buffer ) else pa.memory_map(__a )
lowerCAmelCase_ = pa.ipc.open_stream(__a )
lowerCAmelCase_ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def A(__a: int , __a: int ):
lowerCAmelCase_ = pa.BufferOutputStream()
lowerCAmelCase_ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def A():
lowerCAmelCase_ = pa.BufferOutputStream()
lowerCAmelCase_ = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=__a , features=__a ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowerCAmelCase_ = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ = pa.ipc.open_stream(__a )
lowerCAmelCase_ = f.read_all()
lowerCAmelCase_ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__a )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def A(__a: int ):
lowerCAmelCase_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def A(__a: Any ):
lowerCAmelCase_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def A(__a: int ):
lowerCAmelCase_ = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def A(__a: List[str] , __a: int ):
lowerCAmelCase_ = pa.BufferOutputStream()
lowerCAmelCase_ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def A(__a: Union[str, Any] , __a: List[Any] ):
lowerCAmelCase_ = pa.BufferOutputStream()
lowerCAmelCase_ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def A(__a: Dict , __a: Dict ):
lowerCAmelCase_ = pa.BufferOutputStream()
lowerCAmelCase_ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def A():
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
lowerCAmelCase_ = os.path.join(__a , "test.arrow" )
with ArrowWriter(path=__a , schema=pa.schema(__a ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(__a , 1 )
def A(__a: str ):
if pa.types.is_list(__a ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def A(__a: Tuple , __a: Any ):
if isinstance(lst[0] , __a ):
change_first_primitive_element_in_list(lst[0] , __a )
else:
lowerCAmelCase_ = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def A(__a: Any , __a: str , __a: int ):
lowerCAmelCase_ = pa.array(TypedSequence(__a , optimized_int_type=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def A(__a: Tuple , __a: str , __a: Optional[int] ):
# in range
lowerCAmelCase_ = pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowerCAmelCase_ = copy.deepcopy(__a )
lowerCAmelCase_ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__a , __a )
lowerCAmelCase_ = pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def A(__a: Dict , __a: List[str] ):
lowerCAmelCase_ = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=__a ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def A(__a: Union[str, Any] ):
lowerCAmelCase_ = "mock://dataset-train.arrow"
with ArrowWriter(path=__a , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__a ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__a )
def A():
lowerCAmelCase_ = pa.BufferOutputStream()
with ParquetWriter(stream=__a ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowerCAmelCase_ = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ = pq.read_table(__a )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def A(__a: Tuple , __a: Dict ):
import PIL.Image
lowerCAmelCase_ = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__a , format="png" )
lowerCAmelCase_ = pa.BufferOutputStream()
with ParquetWriter(
stream=__a , features=Features({"image": Image()} ) , embed_local_files=__a ) as writer:
writer.write({"image": image_path} )
writer.finalize()
lowerCAmelCase_ = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ = pq.read_table(__a )
lowerCAmelCase_ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , __a )
with open(__a , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def A():
lowerCAmelCase_ = pa.schema([pa.field("col_1" , pa.string() , nullable=__a )] )
lowerCAmelCase_ = pa.BufferOutputStream()
with ArrowWriter(stream=__a ) as writer:
writer._build_writer(inferred_schema=__a )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 22 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A(__a: Tuple , __a: Union[str, Any] ):
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a )
}
for i in range(__a ):
lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def A(__a: str , __a: str , ):
# Only support V1
lowerCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCAmelCase_ = io.BytesIO(r.content )
lowerCAmelCase_ = OmegaConf.load(__a )
lowerCAmelCase_ = 512
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__a , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__a )
else:
lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a )
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a )
lowerCAmelCase_ = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 22 | 1 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = "▁"
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
SCREAMING_SNAKE_CASE = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
SCREAMING_SNAKE_CASE = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
SCREAMING_SNAKE_CASE = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class UpperCAmelCase_ ( A_ ):
lowercase__ = ["input_ids"]
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any]=None , snake_case_ : Optional[Any]=False , snake_case_ : Any="utf8" , snake_case_ : Optional[Any]="[UNK]" , snake_case_ : Union[str, Any]="[SEP]" , snake_case_ : int="[PAD]" , snake_case_ : Tuple="[CLS]" , snake_case_ : Optional[Any]="[MASK]" , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : str , ) -> None:
'''simple docstring'''
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , vocab_file=snake_case_ , encoding=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
A__ = do_lower_case
A__ = sentencepiece_model_ckpt
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
A__ = self.load_vocab(filepath=snake_case_ )
else:
A__ = {self.sp_model.id_to_piece(snake_case_ ): id for id in range(self.sp_model.get_piece_size() )}
A__ = {v: k for k, v in self.vocab.items()}
def __magic_name__ ( self : Optional[int] , snake_case_ : Any ) -> List[Any]:
'''simple docstring'''
if text is None:
return None
A__ = self.tokenize(snake_case_ )
A__, A__ = "", []
for i, ch in enumerate(snake_case_ ):
if ch in self.SP_CHAR_MAPPING:
A__ = self.SP_CHAR_MAPPING.get(snake_case_ )
else:
A__ = unicodedata.normalize("NFKC" , snake_case_ )
if self.is_whitespace(snake_case_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case_ ) )
A__, A__, A__ = normalized_text, [], 0
if self.do_lower_case:
A__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
A__ = token[1:]
A__ = text[offset:].index(snake_case_ ) + offset
A__ = start + len(snake_case_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
A__ = end
return token_mapping
@property
def __magic_name__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.vocab )
def __magic_name__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Optional[int] ) -> Dict:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def __magic_name__ ( self : List[str] , snake_case_ : List[str] ) -> Tuple:
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(snake_case_ , snake_case_ ) for c in text) )
def __magic_name__ ( self : Dict , snake_case_ : Tuple , snake_case_ : Union[str, Any]=False , snake_case_ : Optional[Any]=64 , snake_case_ : int=0.1 ) -> Dict:
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
A__ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
A__ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
A__ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
A__ = self.sp_model.EncodeAsPieces(snake_case_ )
else:
A__ = self.sp_model.SampleEncodeAsPieces(snake_case_ , snake_case_ , snake_case_ )
A__ = []
for pi, piece in enumerate(snake_case_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case_ ) and pi != 0:
new_pieces.append(snake_case_ )
continue
else:
continue
A__ = 0
for i, chunk in enumerate(snake_case_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case_ ) or self.is_punct(snake_case_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case_ )
A__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A__ = i
if len(snake_case_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def __magic_name__ ( self : List[str] , snake_case_ : Optional[int] ) -> str:
'''simple docstring'''
A__ = "".join(snake_case_ ).replace(snake_case_ , " " ).strip()
return out_string
def __magic_name__ ( self : List[str] , snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
A__ = self.convert_ids_to_tokens(snake_case_ )
A__ = "".join(snake_case_ ).replace(snake_case_ , " " ).strip()
return out_string
def __magic_name__ ( self : List[str] , snake_case_ : Any ) -> Dict:
'''simple docstring'''
return self.vocab.get(snake_case_ , self.vocab.get(self.unk_token ) )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.reverse_vocab.get(snake_case_ , self.unk_token )
def __magic_name__ ( self : int , snake_case_ : Any , snake_case_ : List[Any]=None ) -> Dict:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __magic_name__ ( self : Optional[Any] , snake_case_ : int , snake_case_ : int=None ) -> str:
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __magic_name__ ( self : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1]
def __magic_name__ ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case_ ) + 1) + [1] * (len(snake_case_ ) + 3)
def __magic_name__ ( self : List[str] , snake_case_ : Tuple ) -> Optional[int]:
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __magic_name__ ( self : Optional[int] , snake_case_ : str ) -> Optional[Any]:
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __magic_name__ ( self : Optional[int] , snake_case_ : Optional[int] ) -> Tuple:
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __magic_name__ ( self : Optional[Any] , snake_case_ : List[Any] ) -> List[str]:
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case_ ) == 1:
A__ = unicodedata.category(snake_case_ )
if cat == "Zs":
return True
return False
def __magic_name__ ( self : Tuple , snake_case_ : Optional[Any] ) -> int:
'''simple docstring'''
A__ = {}
with io.open(snake_case_ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(snake_case_ ):
A__ = line.rstrip("\n" )
A__ = int(snake_case_ )
return token_to_idx
def __magic_name__ ( self : List[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
A__ = 0
if os.path.isdir(snake_case_ ):
A__ = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(snake_case_ , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
A__ = token_index
writer.write(token + "\n" )
index += 1
A__ = os.path.join(snake_case_ , "sentencepiece.bpe.model" )
with open(snake_case_ , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (vocab_file,)
| 247 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict=13 , snake_case_ : Optional[Any]=32 , snake_case_ : List[Any]=3 , snake_case_ : Dict=4 , snake_case_ : Tuple=[10, 20, 30, 40] , snake_case_ : int=[2, 2, 3, 2] , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[int]=True , snake_case_ : Union[str, Any]=37 , snake_case_ : Any="gelu" , snake_case_ : Union[str, Any]=10 , snake_case_ : str=0.02 , snake_case_ : str=["stage2", "stage3", "stage4"] , snake_case_ : str=3 , snake_case_ : List[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = type_sequence_label_size
A__ = initializer_range
A__ = out_features
A__ = num_labels
A__ = scope
A__ = num_stages
def __magic_name__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __magic_name__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=snake_case_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=snake_case_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def __magic_name__ ( self : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ = UperNetForSemanticSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __magic_name__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
), (
A__
), (
A__
),
) = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __magic_name__ ( self : int ) -> int:
'''simple docstring'''
A__ = UperNetModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def __magic_name__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(snake_case_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __magic_name__ ( self : Any ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __magic_name__ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __magic_name__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __magic_name__ ( self : Tuple ) -> Dict:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __magic_name__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __magic_name__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
def __magic_name__ ( self : List[Any] ) -> str:
'''simple docstring'''
def check_hidden_states_output(snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
A__ = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(snake_case_ )
A__ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
A__ = model_class(config=snake_case_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def __magic_name__ ( self : Any ) -> str:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = UperNetForSemanticSegmentation.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _SCREAMING_SNAKE_CASE ( ) -> int:
A__ = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
A__ = Image.open(lowercase_ ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> List[Any]:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
A__ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(snake_case_ )
A__ = prepare_img()
A__ = processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
with torch.no_grad():
A__ = model(**snake_case_ )
A__ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 ) )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
A__ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(snake_case_ )
A__ = prepare_img()
A__ = processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
with torch.no_grad():
A__ = model(**snake_case_ )
A__ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 ) )
| 247 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_A = logging.get_logger(__name__)
def a__ ( lowerCAmelCase ) -> List[List[ImageInput]]:
if isinstance(UpperCAmelCase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCAmelCase_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCAmelCase_ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class lowerCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__(self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(**_A )
UpperCAmelCase__ : str = size if size is not None else {'shortest_edge': 224}
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCAmelCase__ : str = get_size_dict(_A , param_name="""crop_size""" )
UpperCAmelCase__ : int = do_resize
UpperCAmelCase__ : List[str] = size
UpperCAmelCase__ : Dict = do_center_crop
UpperCAmelCase__ : Tuple = crop_size
UpperCAmelCase__ : Any = resample
UpperCAmelCase__ : List[Any] = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : str = do_normalize
UpperCAmelCase__ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
UpperCAmelCase__ : int = get_resize_output_image_size(_A , size["""shortest_edge"""] , default_to_square=_A )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Union[str, Any] = (size['height'], size['width'])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size["""height"""], size["""width"""]) , data_format=_A , **_A )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Union[str, Any] = to_numpy_array(_A )
if do_resize:
UpperCAmelCase__ : Optional[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
UpperCAmelCase__ : Optional[Any] = self.center_crop(_A , size=_A )
if do_rescale:
UpperCAmelCase__ : Optional[Any] = self.rescale(image=_A , scale=_A )
if do_normalize:
UpperCAmelCase__ : Tuple = self.normalize(image=_A , mean=_A , std=_A )
UpperCAmelCase__ : Optional[Any] = to_channel_dimension_format(_A , _A )
return image
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : List[str] = resample if resample is not None else self.resample
UpperCAmelCase__ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : List[str] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Tuple = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : int = size if size is not None else self.size
UpperCAmelCase__ : str = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : int = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name="""crop_size""" )
if not valid_images(_A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
UpperCAmelCase__ : List[Any] = make_batched(_A )
UpperCAmelCase__ : Tuple = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : str = {'pixel_values': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 357 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=_lowerCamelCase , )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCamelCase )
class lowerCamelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=_lowerCamelCase , )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCamelCase )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def a__ ( ) -> List[str]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@require_beam
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Dict = DummyBeamDataset(cache_dir=_lowerCamelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
UpperCAmelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _lowerCamelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _lowerCamelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _a (self ):
"""simple docstring"""
import apache_beam as beam
UpperCAmelCase__ : Optional[int] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_lowerCamelCase , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
UpperCAmelCase__ : int = partial(_lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _lowerCamelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _a (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=_lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[Any] = NestedBeamDataset(cache_dir=_lowerCamelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
UpperCAmelCase__ : str = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _lowerCamelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _lowerCamelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 166 | 0 |
from math import factorial
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(snake_case__ ) // (factorial(snake_case__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
F'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
F'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 222 |
"""simple docstring"""
import operator as op
lowerCAmelCase : Dict = """scaler.pt"""
lowerCAmelCase : Tuple = """pytorch_model"""
lowerCAmelCase : Union[str, Any] = """random_states"""
lowerCAmelCase : Union[str, Any] = """optimizer"""
lowerCAmelCase : Dict = """scheduler"""
lowerCAmelCase : int = """pytorch_model.bin"""
lowerCAmelCase : str = """pytorch_model.bin.index.json"""
lowerCAmelCase : Union[str, Any] = """model.safetensors"""
lowerCAmelCase : List[Any] = """model.safetensors.index.json"""
lowerCAmelCase : List[Any] = """1.10.2"""
lowerCAmelCase : Any = """py38"""
lowerCAmelCase : Optional[int] = """4.17.0"""
lowerCAmelCase : str = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
lowerCAmelCase : Tuple = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
lowerCAmelCase : List[Any] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
lowerCAmelCase : List[str] = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
lowerCAmelCase : List[str] = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
lowerCAmelCase : Any = """2.0.1"""
lowerCAmelCase : List[Any] = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
lowerCAmelCase : Union[str, Any] = ["""default""", """reduce-overhead""", """max-autotune"""]
lowerCAmelCase : Optional[int] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowerCAmelCase : Union[str, Any] = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
lowerCAmelCase : List[str] = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
lowerCAmelCase : Optional[Any] = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 291 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "nat"
__UpperCamelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _a=4 , _a=3 , _a=64 , _a=[3, 4, 6, 5] , _a=[2, 4, 8, 16] , _a=7 , _a=3.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=0.02 , _a=1e-5 , _a=0.0 , _a=None , _a=None , **_a , ):
"""simple docstring"""
super().__init__(**_a )
lowerCamelCase = patch_size
lowerCamelCase = num_channels
lowerCamelCase = embed_dim
lowerCamelCase = depths
lowerCamelCase = len(_a )
lowerCamelCase = num_heads
lowerCamelCase = kernel_size
lowerCamelCase = mlp_ratio
lowerCamelCase = qkv_bias
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = drop_path_rate
lowerCamelCase = hidden_act
lowerCamelCase = layer_norm_eps
lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase = int(embed_dim * 2 ** (len(_a ) - 1) )
lowerCamelCase = layer_scale_init_value
lowerCamelCase = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_a ) + 1 )]
lowerCamelCase , lowerCamelCase = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 168 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" )
lowerCamelCase = {
"""input_ids""": tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowerCamelCase = model(_a )["""last_hidden_state"""]
lowerCamelCase = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
lowerCamelCase = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 168 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =['''image_processor''', '''tokenizer''']
a__ ='''LayoutLMv3ImageProcessor'''
a__ =('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self , A=None , A=None , **A ) -> Dict:
_UpperCAmelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A , )
_UpperCAmelCase : Any = kwargs.pop('''feature_extractor''' )
_UpperCAmelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(A , A )
def __call__( self , A , A = None , A = None , A = None , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
_UpperCAmelCase : str = self.image_processor(images=A , return_tensors=A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(A , A ):
_UpperCAmelCase : Dict = [text] # add batch dimension (as the image processor always adds a batch dimension)
_UpperCAmelCase : Tuple = features['''words''']
_UpperCAmelCase : List[Any] = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
# add pixel values
_UpperCAmelCase : Union[str, Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
_UpperCAmelCase : Optional[int] = self.get_overflowing_images(A , encoded_inputs['''overflow_to_sample_mapping'''] )
_UpperCAmelCase : Optional[int] = images
return encoded_inputs
def __lowerCAmelCase ( self , A , A ) -> List[str]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_UpperCAmelCase : Optional[int] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(A ) != len(A ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(A )} and {len(A )}' )
return images_with_overflow
def __lowerCAmelCase ( self , *A , **A ) -> Optional[Any]:
return self.tokenizer.batch_decode(*A , **A )
def __lowerCAmelCase ( self , *A , **A ) -> Tuple:
return self.tokenizer.decode(*A , **A )
@property
def __lowerCAmelCase ( self ) -> Tuple:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __lowerCAmelCase ( self ) -> str:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A , )
return self.image_processor
| 263 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 1 |
lowercase =8.314_4598
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : float ):
'''simple docstring'''
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowercase =300
lowercase =28
lowercase =rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 368 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , **snake_case) -> Optional[int]:
'''simple docstring'''
super().__init__(**snake_case)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , snake_case , **snake_case) -> str:
'''simple docstring'''
return super().__call__(snake_case , **snake_case)
def lowerCAmelCase ( self , **snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : str ={}
if "candidate_labels" in kwargs:
_UpperCAmelCase : Union[str, Any] =kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_UpperCAmelCase : List[Any] =kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , snake_case , snake_case=None , snake_case="This is a photo of {}.") -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =load_image(snake_case)
_UpperCAmelCase : Union[str, Any] =self.image_processor(images=[image] , return_tensors=self.framework)
_UpperCAmelCase : Union[str, Any] =candidate_labels
_UpperCAmelCase : List[Any] =[hypothesis_template.format(snake_case) for x in candidate_labels]
_UpperCAmelCase : str =self.tokenizer(snake_case , return_tensors=self.framework , padding=snake_case)
_UpperCAmelCase : Any =[text_inputs]
return inputs
def lowerCAmelCase ( self , snake_case) -> str:
'''simple docstring'''
_UpperCAmelCase : List[str] =model_inputs.pop('candidate_labels')
_UpperCAmelCase : Tuple =model_inputs.pop('text_inputs')
if isinstance(text_inputs[0] , snake_case):
_UpperCAmelCase : Any =text_inputs[0]
else:
# Batching case.
_UpperCAmelCase : str =text_inputs[0][0]
_UpperCAmelCase : Any =self.model(**snake_case , **snake_case)
_UpperCAmelCase : List[str] ={
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , snake_case) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str =model_outputs.pop('candidate_labels')
_UpperCAmelCase : Union[str, Any] =model_outputs['logits'][0]
if self.framework == "pt":
_UpperCAmelCase : Dict =logits.softmax(dim=-1).squeeze(-1)
_UpperCAmelCase : Union[str, Any] =probs.tolist()
if not isinstance(snake_case , snake_case):
_UpperCAmelCase : Union[str, Any] =[scores]
elif self.framework == "tf":
_UpperCAmelCase : Dict =stable_softmax(snake_case , axis=-1)
_UpperCAmelCase : str =probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}")
_UpperCAmelCase : List[str] =[
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(snake_case , snake_case) , key=lambda snake_case: -x[0])
]
return result
| 242 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase ( ):
snake_case , snake_case : Union[str, Any] = 9, 14 # noqa: F841
snake_case : int = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
snake_case : int = defaultdict(__lowerCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
snake_case : Any = mst(__lowerCamelCase )
snake_case : Optional[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
snake_case : Union[str, Any] = tuple(answer[:2] )
snake_case : Tuple = tuple(edge[::-1] )
assert edge in result or reverse in result
| 59 | """simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
for attribute in key.split('.' ):
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "running_mean":
snake_case_ = value
elif weight_type == "running_var":
snake_case_ = value
elif weight_type == "num_batches_tracked":
snake_case_ = value
elif weight_type == "inv_freq":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(UpperCAmelCase )[0].split('.' )[-2]
snake_case_ = mapped_key.replace('*' , UpperCAmelCase )
if "pos_bias_u" in name:
snake_case_ = None
elif "pos_bias_v" in name:
snake_case_ = None
elif "weight_g" in name:
snake_case_ = 'weight_g'
elif "weight_v" in name:
snake_case_ = 'weight_v'
elif "bias" in name:
snake_case_ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = 'weight'
elif "running_mean" in name:
snake_case_ = 'running_mean'
elif "inv_freq" in name:
snake_case_ = 'inv_freq'
elif "running_var" in name:
snake_case_ = 'running_var'
elif "num_batches_tracked" in name:
snake_case_ = 'num_batches_tracked'
else:
snake_case_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = full_name.split('conv_layers.' )[-1]
snake_case_ = name.split('.' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) -> str:
if config_path is not None:
snake_case_ = WavaVecaConformerConfig.from_pretrained(UpperCAmelCase , hidden_act='swish' )
else:
snake_case_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
snake_case_ = 'rotary'
if is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(UpperCAmelCase , 'vocab.json' )
if not os.path.isdir(UpperCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
snake_case_ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ = 0
snake_case_ = 1
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
snake_case_ = WavaVecaCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase , )
snake_case_ = True if config.feat_extract_norm == 'layer' else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
snake_case_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
snake_case_ = WavaVecaConformerForCTC(UpperCAmelCase )
else:
snake_case_ = WavaVecaConformerForPreTraining(UpperCAmelCase )
if is_finetuned:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
snake_case_ = argparse.Namespace(task='audio_pretraining' )
snake_case_ = fairseq.tasks.setup_task(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase )
snake_case_ = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 69 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class a__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = RoFormerTokenizer
a : Tuple = RoFormerTokenizerFast
a : Dict = True
a : Optional[Any] = True
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowerCAmelCase_ ( self , **A ) -> Tuple:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **A )
def lowerCAmelCase_ ( self , **A ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **A )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
a = "永和服装饰品有限公司,今天天气非常好"
a = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = self.get_tokenizer()
a , a = self.get_chinese_input_output_texts()
a = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
a = tokens + [tokenizer.unk_token]
a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = self.get_rust_tokenizer()
a , a = self.get_chinese_input_output_texts()
a = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
a = tokens + [tokenizer.unk_token]
a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 180 |
import math
import sys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
if number != int(__UpperCamelCase):
raise ValueError("the value of input must be a natural number")
if number < 0:
raise ValueError("the value of input must not be a negative number")
if number == 0:
return 1
a = [-1] * (number + 1)
a = 0
for i in range(1 , number + 1):
a = sys.maxsize
a = int(math.sqrt(__UpperCamelCase))
for j in range(1 , root + 1):
a = 1 + answers[i - (j**2)]
a = min(__UpperCamelCase , __UpperCamelCase)
a = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | 1 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str]=[] ):
__a : Any = size[0] - overlap_pixels * 2
__a : List[Any] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__a : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
__a : Tuple = np.pad(_SCREAMING_SNAKE_CASE , mode='linear_ramp' , pad_width=_SCREAMING_SNAKE_CASE , end_values=0 )
if "l" in remove_borders:
__a : str = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__a : int = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__a : Optional[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__a : List[str] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any ):
return max(_SCREAMING_SNAKE_CASE , min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : [int] , _SCREAMING_SNAKE_CASE : [int] , _SCREAMING_SNAKE_CASE : [int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : [int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : [int] ):
__a : int = list(_SCREAMING_SNAKE_CASE )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__a : str = clamp_rect(_SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any ):
__a : Dict = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_SCREAMING_SNAKE_CASE , (original_slice, 0) )
return result
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any ):
__a : Tuple = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__a : Any = tile.crop(_SCREAMING_SNAKE_CASE )
return tile
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : int = n % d
return n - divisor
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a , __a , __a , __a , __a , __a = 350 , ):
'''simple docstring'''
super().__init__(
vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , low_res_scheduler=__a , scheduler=__a , max_noise_level=__a , )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , **__a ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__a : List[Any] = add_overlap_rect(__a , __a , image.size )
__a : Tuple = image.crop(__a )
__a : Union[str, Any] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__a : Tuple = translated_slice_x - (original_image_slice / 2)
__a : Optional[Any] = max(0 , __a )
__a : List[Any] = squeeze_tile(__a , __a , __a , __a )
__a : int = to_input.size
__a : Union[str, Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__a : str = super(__a , self ).__call__(image=__a , **__a ).images[0]
__a : List[Any] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__a : Tuple = unsqueeze_tile(__a , __a )
__a : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__a : Dict = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__a : Dict = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__a ) , mode='L' , )
final_image.paste(
__a , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __a )
@torch.no_grad()
def __call__( self , __a , __a , __a = 75 , __a = 9.0 , __a = 50 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = None , __a = 1 , __a = 128 , __a = 32 , __a = 32 , ):
'''simple docstring'''
__a : Tuple = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__a : Dict = math.ceil(image.size[0] / tile_size )
__a : Optional[int] = math.ceil(image.size[1] / tile_size )
__a : int = tcx * tcy
__a : List[str] = 0
for y in range(__a ):
for x in range(__a ):
self._process_tile(
__a , __a , __a , __a , __a , __a , __a , prompt=__a , num_inference_steps=__a , guidance_scale=__a , noise_level=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def lowerCamelCase ():
# Run a demo
__a : Any = 'stabilityai/stable-diffusion-x4-upscaler'
__a : int = StableDiffusionTiledUpscalePipeline.from_pretrained(_SCREAMING_SNAKE_CASE , revision='fp16' , torch_dtype=torch.floataa )
__a : Union[str, Any] = pipe.to('cuda' )
__a : Union[str, Any] = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(_SCREAMING_SNAKE_CASE : str ):
print(F"""progress: {obj["progress"]:.4f}""" )
obj["image"].save('diffusers_library_progress.jpg' )
__a : Dict = pipe(image=_SCREAMING_SNAKE_CASE , prompt='Black font, white background, vector' , noise_level=40 , callback=_SCREAMING_SNAKE_CASE )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 27 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowercase : Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowercase : Any = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def __UpperCAmelCase ( self , __a , __a , __a=None , __a=False , __a=False , __a=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__a : Tuple = np.array([re.sub(__a , '' , __a ) for x in predictions] )
__a : List[Any] = np.array([re.sub(__a , '' , __a ) for x in references] )
else:
__a : int = np.asarray(__a )
__a : str = np.asarray(__a )
if ignore_case:
__a : Dict = np.char.lower(__a )
__a : List[str] = np.char.lower(__a )
if ignore_punctuation:
__a : Dict = string.punctuation.maketrans('' , '' , string.punctuation )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Dict = np.char.translate(__a , table=__a )
if ignore_numbers:
__a : Optional[int] = string.digits.maketrans('' , '' , string.digits )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Optional[int] = np.char.translate(__a , table=__a )
__a : Any = predictions == references
return {"exact_match": np.mean(__a ) * 100}
| 27 | 1 |
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str , **snake_case_ : List[str] ) -> str:
"""simple docstring"""
_lowerCAmelCase = AutoConfig.from_pretrained(a__ , **a__ )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_config(a__ )
model.save_pretrained(a__ )
AutoTokenizer.from_pretrained(a__ ).save_pretrained(a__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version) | 371 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return getitem, k
def __UpperCAmelCase ( snake_case_ : Dict , snake_case_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return setitem, k, v
def __UpperCAmelCase ( snake_case_ : str ) -> Optional[int]:
"""simple docstring"""
return delitem, k
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , *snake_case_ : Tuple ) -> str:
"""simple docstring"""
try:
return fun(snake_case_ , *snake_case_ ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE : int = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
SCREAMING_SNAKE_CASE : List[Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
SCREAMING_SNAKE_CASE : Any = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
SCREAMING_SNAKE_CASE : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __UpperCAmelCase ( snake_case_ : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = HashMap(initial_block_size=4 )
_lowerCAmelCase = {}
for _, (fun, *args) in enumerate(snake_case_ ):
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
_lowerCAmelCase , _lowerCAmelCase = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
assert my_res == py_res
assert str(snake_case_ ) == str(snake_case_ )
assert set(snake_case_ ) == set(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
assert set(my.items() ) == set(py.items() )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
def is_public(snake_case_ : str ) -> bool:
return not name.startswith("""_""" )
_lowerCAmelCase = {name for name in dir({} ) if is_public(snake_case_ )}
_lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(snake_case_ )}
assert dict_public_names > hash_public_names | 317 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any]=13 , UpperCamelCase__: List[str]=7 , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: Dict=True , UpperCamelCase__: Tuple=False , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: List[Any]=19 , UpperCamelCase__: str=32 , UpperCamelCase__: Tuple=5 , UpperCamelCase__: Any=4 , UpperCamelCase__: Dict=37 , UpperCamelCase__: Dict="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: str=0.1 , UpperCamelCase__: Optional[int]=512 , UpperCamelCase__: str=16 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : Optional[Any] = seq_length
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : Optional[int] = use_input_mask
lowerCamelCase__ : List[str] = use_token_type_ids
lowerCamelCase__ : List[Any] = use_labels
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : int = type_vocab_size
lowerCamelCase__ : Dict = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : List[Any] = num_choices
lowerCamelCase__ : Any = scope
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : int = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : List[Any] = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=UpperCamelCase__ , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def lowerCamelCase_ ( self: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : Dict = EsmForProteinFolding(config=UpperCamelCase__ ).float()
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = config_and_inputs
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = False
a = (EsmForProteinFolding,) if is_torch_available() else ()
a = ()
a = {} if is_torch_available() else {}
a = False
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = EsmFoldModelTester(self )
lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@unittest.skip("""Does not support attention outputs""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
@unittest.skip
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase_ ( self: str ):
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase_ ( self: Optional[Any] ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip("""ESMFold only has one output format.""" )
def lowerCamelCase_ ( self: Dict ):
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase_ ( self: Any ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase_ ( self: str ):
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: str ):
pass
@require_torch
class _lowercase ( _lowercase ):
@slow
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[Any] = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
lowerCamelCase__ : Optional[int] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase__ : Any = model(UpperCamelCase__ )["""positions"""]
lowerCamelCase__ : Dict = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , UpperCamelCase__ , atol=1e-4 ) )
| 41 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _snake_case( ) -> Dict:
'''simple docstring'''
A__ = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE__ )
A__ = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
# Parse args
A__ , A__ = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE__ , 'func' ):
parser.print_help()
exit(1 )
A__ = parse_unknown_args(SCREAMING_SNAKE_CASE__ )
# Run
A__ = args.func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
service.run()
if __name__ == "__main__":
main()
| 7 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCAmelCase : str = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 354 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = analyze_text(a__ )
__SCREAMING_SNAKE_CASE = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__SCREAMING_SNAKE_CASE = sum(single_char_strings.values() )
# one length string
__SCREAMING_SNAKE_CASE = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__SCREAMING_SNAKE_CASE = single_char_strings[ch]
__SCREAMING_SNAKE_CASE = my_str / all_sum
my_fir_sum += prob * math.loga(a__ ) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum ):.1f}' )
# two len string
__SCREAMING_SNAKE_CASE = sum(two_char_strings.values() )
__SCREAMING_SNAKE_CASE = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__SCREAMING_SNAKE_CASE = cha + cha
if sequence in two_char_strings:
__SCREAMING_SNAKE_CASE = two_char_strings[sequence]
__SCREAMING_SNAKE_CASE = int(a__ ) / all_sum
my_sec_sum += prob * math.loga(a__ )
# print second entropy
print(F'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 331 | 0 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
UpperCAmelCase : int ="""pytorch_model.bin"""
UpperCAmelCase : List[str] ="""pytorch_model.bin.index.json"""
UpperCAmelCase : Dict ="""adapter_config.json"""
UpperCAmelCase : List[str] ="""adapter_model.bin"""
UpperCAmelCase : Tuple ="""adapter_model.safetensors"""
UpperCAmelCase : Optional[Any] ="""tf_model.h5"""
UpperCAmelCase : str ="""tf_model.h5.index.json"""
UpperCAmelCase : Optional[Any] ="""model.ckpt"""
UpperCAmelCase : int ="""flax_model.msgpack"""
UpperCAmelCase : Tuple ="""flax_model.msgpack.index.json"""
UpperCAmelCase : List[str] ="""model.safetensors"""
UpperCAmelCase : List[Any] ="""model.safetensors.index.json"""
UpperCAmelCase : Any ="""config.json"""
UpperCAmelCase : Optional[int] ="""preprocessor_config.json"""
UpperCAmelCase : Dict =FEATURE_EXTRACTOR_NAME
UpperCAmelCase : Any ="""generation_config.json"""
UpperCAmelCase : Dict ="""modelcard.json"""
UpperCAmelCase : List[Any] ="""▁"""
UpperCAmelCase : Optional[int] =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
UpperCAmelCase : Optional[Any] =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
UpperCAmelCase : List[Any] =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
UpperCAmelCase : List[Any] =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _lowerCAmelCase (_lowerCAmelCase):
if version.parse(_lowerCAmelCase) < version.parse(_lowerCAmelCase):
if "dev" in min_version:
UpperCamelCase_ = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
UpperCamelCase_ = f"""This example requires a minimum version of {min_version},"""
error_message += f""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers.")
| 128 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Any =logging.get_logger(__name__)
def _lowerCAmelCase (_lowerCAmelCase):
print("Loading config file...")
def flatten_yaml_as_dict(_lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase="."):
UpperCamelCase_ = []
for k, v in d.items():
UpperCamelCase_ = parent_key + sep + k if parent_key else k
if isinstance(_lowerCAmelCase , collections.abc.MutableMapping):
items.extend(flatten_yaml_as_dict(_lowerCAmelCase , _lowerCAmelCase , sep=_lowerCAmelCase).items())
else:
items.append((new_key, v))
return dict(_lowerCAmelCase)
UpperCamelCase_ = argparse.Namespace()
with open(_lowerCAmelCase , "r") as yaml_file:
try:
UpperCamelCase_ = yaml.load(_lowerCAmelCase , Loader=yaml.FullLoader)
UpperCamelCase_ = flatten_yaml_as_dict(_lowerCAmelCase)
for k, v in flat_cfg.items():
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(_lowerCAmelCase , str(_lowerCAmelCase)))
return config
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = MobileViTVaConfig()
UpperCamelCase_ = False
# dataset
if task_name.startswith("imagenet1k_"):
UpperCamelCase_ = 10_00
if int(task_name.strip().split("_")[-1]) == 3_84:
UpperCamelCase_ = 3_84
else:
UpperCamelCase_ = 2_56
UpperCamelCase_ = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_"):
UpperCamelCase_ = 2_10_00
if int(task_name.strip().split("_")[-1]) == 3_84:
UpperCamelCase_ = 3_84
else:
UpperCamelCase_ = 2_56
UpperCamelCase_ = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_"):
UpperCamelCase_ = 1_51
UpperCamelCase_ = 5_12
UpperCamelCase_ = "ade20k-id2label.json"
UpperCamelCase_ = True
elif task_name.startswith("voc_"):
UpperCamelCase_ = 21
UpperCamelCase_ = 5_12
UpperCamelCase_ = "pascal-voc-id2label.json"
UpperCamelCase_ = True
# orig_config
UpperCamelCase_ = load_orig_config_file(_lowerCAmelCase)
assert getattr(_lowerCAmelCase , "model.classification.name" , -1) == "mobilevit_v2", "Invalid model"
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.classification.mitv2.width_multiplier" , 1.0)
assert (
getattr(_lowerCAmelCase , "model.classification.mitv2.attn_norm_layer" , -1) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.classification.activation.name" , "swish")
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.segmentation.output_stride" , 16)
if "_deeplabv3" in task_name:
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36])
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.segmentation.deeplabv3.aspp_out_channels" , 5_12)
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.segmentation.deeplabv3.aspp_dropout" , 0.1)
# id2label
UpperCamelCase_ = "huggingface/label-files"
UpperCamelCase_ = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset") , "r"))
UpperCamelCase_ = {int(_lowerCAmelCase): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = dct.pop(_lowerCAmelCase)
UpperCamelCase_ = val
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=False):
if base_model:
UpperCamelCase_ = ""
else:
UpperCamelCase_ = "mobilevitv2."
UpperCamelCase_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCamelCase_ = k[8:]
else:
UpperCamelCase_ = k
if ".block." in k:
UpperCamelCase_ = k_new.replace(".block." , ".")
if ".conv." in k:
UpperCamelCase_ = k_new.replace(".conv." , ".convolution.")
if ".norm." in k:
UpperCamelCase_ = k_new.replace(".norm." , ".normalization.")
if "conv_1." in k:
UpperCamelCase_ = k_new.replace("conv_1." , f"""{model_prefix}conv_stem.""")
for i in [1, 2]:
if f"""layer_{i}.""" in k:
UpperCamelCase_ = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""")
if ".exp_1x1." in k:
UpperCamelCase_ = k_new.replace(".exp_1x1." , ".expand_1x1.")
if ".red_1x1." in k:
UpperCamelCase_ = k_new.replace(".red_1x1." , ".reduce_1x1.")
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
UpperCamelCase_ = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""")
if f"""layer_{i}.1.local_rep.0.""" in k:
UpperCamelCase_ = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""")
if f"""layer_{i}.1.local_rep.1.""" in k:
UpperCamelCase_ = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""")
for i in [3, 4, 5]:
if i == 3:
UpperCamelCase_ = [0, 1]
elif i == 4:
UpperCamelCase_ = [0, 1, 2, 3]
elif i == 5:
UpperCamelCase_ = [0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
UpperCamelCase_ = k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""")
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
UpperCamelCase_ = k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""")
if f"""layer_{i}.1.conv_proj.""" in k:
UpperCamelCase_ = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""")
if "pre_norm_attn.0." in k:
UpperCamelCase_ = k_new.replace("pre_norm_attn.0." , "layernorm_before.")
if "pre_norm_attn.1." in k:
UpperCamelCase_ = k_new.replace("pre_norm_attn.1." , "attention.")
if "pre_norm_ffn.0." in k:
UpperCamelCase_ = k_new.replace("pre_norm_ffn.0." , "layernorm_after.")
if "pre_norm_ffn.1." in k:
UpperCamelCase_ = k_new.replace("pre_norm_ffn.1." , "ffn.conv1.")
if "pre_norm_ffn.3." in k:
UpperCamelCase_ = k_new.replace("pre_norm_ffn.3." , "ffn.conv2.")
if "classifier.1." in k:
UpperCamelCase_ = k_new.replace("classifier.1." , "classifier.")
if "seg_head." in k:
UpperCamelCase_ = k_new.replace("seg_head." , "segmentation_head.")
if ".aspp_layer." in k:
UpperCamelCase_ = k_new.replace(".aspp_layer." , ".")
if ".aspp_pool." in k:
UpperCamelCase_ = k_new.replace(".aspp_pool." , ".")
rename_keys.append((k, k_new))
return rename_keys
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head."):
keys_to_ignore.append(_lowerCAmelCase)
for k in keys_to_ignore:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase)
def _lowerCAmelCase ():
UpperCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCamelCase_ = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase).raw)
return im
@torch.no_grad()
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = get_mobilevitva_config(_lowerCAmelCase , _lowerCAmelCase)
# load original state_dict
UpperCamelCase_ = torch.load(_lowerCAmelCase , map_location="cpu")
# load huggingface model
if task_name.startswith("ade20k_") or task_name.startswith("voc_"):
UpperCamelCase_ = MobileViTVaForSemanticSegmentation(_lowerCAmelCase).eval()
UpperCamelCase_ = False
else:
UpperCamelCase_ = MobileViTVaForImageClassification(_lowerCAmelCase).eval()
UpperCamelCase_ = False
# remove and rename some keys of load the original model
UpperCamelCase_ = checkpoint
remove_unused_keys(_lowerCAmelCase)
UpperCamelCase_ = create_rename_keys(_lowerCAmelCase , base_model=_lowerCAmelCase)
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# load modified state_dict
model.load_state_dict(_lowerCAmelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCamelCase_ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
UpperCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt")
UpperCamelCase_ = model(**_lowerCAmelCase)
# verify classification model
if task_name.startswith("imagenet"):
UpperCamelCase_ = outputs.logits
UpperCamelCase_ = logits.argmax(-1).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx])
if task_name.startswith("imagenet1k_256") and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCamelCase_ = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01])
assert torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-4)
Path(_lowerCAmelCase).mkdir(exist_ok=_lowerCAmelCase)
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(_lowerCAmelCase)
print(f"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(_lowerCAmelCase)
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCAmelCase : Any =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 128 | 1 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
A__ : List[str] = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
A__ : Optional[int] = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
A__ : Dict = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def A_ ( self : str ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def A_ ( self : str , __a : List[str] , __a : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case : Optional[int] = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
__snake_case : Optional[Any] = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 364 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ : int = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : Any = 10
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = [1, 2, 3, 4]
__SCREAMING_SNAKE_CASE : str = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCAmelCase__ , self.block_size , 0 ) , lowerCAmelCase__ )
def __magic_name__( self :int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__SCREAMING_SNAKE_CASE : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase__ , self.block_size , 0 ) , lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__SCREAMING_SNAKE_CASE : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase__ , self.block_size , 0 ) , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = process_story(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , [] )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = ''''''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = process_story(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , [] )
self.assertEqual(lowerCAmelCase__ , [] )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = process_story(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''It was the best of times.''']
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([1, 2, 3, 4] )
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__SCREAMING_SNAKE_CASE : int = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __magic_name__( self :str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __magic_name__( self :str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = 101
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__SCREAMING_SNAKE_CASE : str = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__SCREAMING_SNAKE_CASE : Tuple = compute_token_type_ids(lowerCAmelCase__ , lowerCAmelCase__ )
np.testing.assert_array_equal(lowerCAmelCase__ , lowerCAmelCase__ )
| 9 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''ylacombe/bark-small'''
__SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = '''en_speaker_1'''
__SCREAMING_SNAKE_CASE : Any = '''This is a test string'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__SCREAMING_SNAKE_CASE : str = 35
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 9 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : str =field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a_ : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[str] =field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a_ : Optional[int] =field(
default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ : Optional[int] =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
a_ : Optional[int] =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ : Optional[int] =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.train_file is not None:
_snake_case : Optional[int] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_snake_case : Optional[Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : PreTrainedTokenizerBase
a_ : Union[bool, str, PaddingStrategy] =True
a_ : Optional[int] =None
a_ : Optional[int] =None
def __call__( self : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : str = 'label' if 'label' in features[0].keys() else 'labels'
_snake_case : List[Any] = [feature.pop(UpperCamelCase ) for feature in features]
_snake_case : Any = len(UpperCamelCase )
_snake_case : Tuple = len(features[0]['input_ids'] )
_snake_case : List[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
_snake_case : Optional[Any] = list(chain(*UpperCamelCase ) )
_snake_case : Tuple = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
_snake_case : Tuple = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
_snake_case : List[Any] = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def lowerCamelCase_ ( )-> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , lowerCAmelCase , lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
datasets.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_snake_case : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_snake_case : List[str] = {}
if data_args.train_file is not None:
_snake_case : List[Any] = data_args.train_file
if data_args.validation_file is not None:
_snake_case : int = data_args.validation_file
_snake_case : int = data_args.train_file.split('.' )[-1]
_snake_case : List[str] = load_dataset(
lowerCAmelCase , data_files=lowerCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_snake_case : Tuple = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_snake_case : Union[str, Any] = [F"""ending{i}""" for i in range(4 )]
_snake_case : Dict = 'sent1'
_snake_case : Optional[int] = 'sent2'
if data_args.max_seq_length is None:
_snake_case : Any = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
_snake_case : Any = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_snake_case : List[str] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCAmelCase: Optional[int] ):
_snake_case : List[Any] = [[context] * 4 for context in examples[context_name]]
_snake_case : Any = examples[question_header_name]
_snake_case : Tuple = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowerCAmelCase )
]
# Flatten out
_snake_case : Dict = list(chain(*lowerCAmelCase ) )
_snake_case : Optional[Any] = list(chain(*lowerCAmelCase ) )
# Tokenize
_snake_case : Tuple = tokenizer(
lowerCAmelCase , lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_snake_case : str = raw_datasets['train']
if data_args.max_train_samples is not None:
_snake_case : Union[str, Any] = min(len(lowerCAmelCase ) , data_args.max_train_samples )
_snake_case : Any = train_dataset.select(range(lowerCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_snake_case : Optional[int] = train_dataset.map(
lowerCAmelCase , batched=lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_snake_case : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_snake_case : str = min(len(lowerCAmelCase ) , data_args.max_eval_samples )
_snake_case : Optional[int] = eval_dataset.select(range(lowerCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_snake_case : Optional[int] = eval_dataset.map(
lowerCAmelCase , batched=lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_snake_case : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCAmelCase: Tuple ):
_snake_case : List[str] = eval_predictions
_snake_case : Optional[int] = np.argmax(lowerCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_snake_case : str = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCAmelCase , data_collator=lowerCAmelCase , compute_metrics=lowerCAmelCase , )
# Training
if training_args.do_train:
_snake_case : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
_snake_case : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case : List[str] = last_checkpoint
_snake_case : List[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_snake_case : Optional[Any] = train_result.metrics
_snake_case : List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase )
)
_snake_case : Tuple = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.log_metrics('train' , lowerCAmelCase )
trainer.save_metrics('train' , lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_snake_case : Tuple = trainer.evaluate()
_snake_case : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase )
_snake_case : str = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.log_metrics('eval' , lowerCAmelCase )
trainer.save_metrics('eval' , lowerCAmelCase )
_snake_case : str = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase )
else:
trainer.create_model_card(**lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: str )-> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 353 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] ="""fnet"""
def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any]=3_20_00 , UpperCamelCase : Any=7_68 , UpperCamelCase : Tuple=12 , UpperCamelCase : Union[str, Any]=30_72 , UpperCamelCase : int="gelu_new" , UpperCamelCase : str=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=4 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : Union[str, Any]=1e-1_2 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=5_12 , UpperCamelCase : Any=3 , UpperCamelCase : str=1 , UpperCamelCase : Optional[Any]=2 , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Optional[int] = vocab_size
_snake_case : int = max_position_embeddings
_snake_case : Dict = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : List[str] = intermediate_size
_snake_case : Union[str, Any] = hidden_act
_snake_case : Optional[Any] = hidden_dropout_prob
_snake_case : List[Any] = initializer_range
_snake_case : int = type_vocab_size
_snake_case : Union[str, Any] = layer_norm_eps
_snake_case : str = use_tpu_fourier_optimizations
_snake_case : Tuple = tpu_short_seq_length
| 260 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = [[float('''inf''' ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
lowercase__ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCamelCase ):
# looping through rows of graph array
for i in range(__lowerCamelCase ):
# looping through columns of graph array
for j in range(__lowerCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowercase__ : str = dist[i][k] + dist[k][j]
_print_dist(__lowerCamelCase , __lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('Enter number of vertices: '))
lowerCAmelCase_ = int(input('Enter number of edges: '))
lowerCAmelCase_ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowerCAmelCase_ = int(input('Enter source:'))
lowerCAmelCase_ = int(input('Enter destination:'))
lowerCAmelCase_ = float(input('Enter weight:'))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 16 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[Any]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : int = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : str = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : int = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : List[Any] = 2
# Initialize accelerator
lowercase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : str = config['''lr''']
lowercase__ : str = int(config['''num_epochs'''] )
lowercase__ : Optional[int] = int(config['''seed'''] )
lowercase__ : Tuple = int(config['''batch_size'''] )
lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : List[str] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[Any] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Dict = model(**__lowerCamelCase )
lowercase__ : List[Any] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Any = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowerCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : Tuple=False ):
'''simple docstring'''
try:
UpperCamelCase__ : Dict =os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase__ : Optional[int] =default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase__ : str =strtobool(UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
_SCREAMING_SNAKE_CASE : Optional[Any] = parse_flag_from_env("""RUN_SLOW""", default=False)
_SCREAMING_SNAKE_CASE : Optional[int] = parse_flag_from_env("""RUN_REMOTE""", default=False)
_SCREAMING_SNAKE_CASE : Dict = parse_flag_from_env("""RUN_LOCAL""", default=True)
_SCREAMING_SNAKE_CASE : Dict = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
_SCREAMING_SNAKE_CASE : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
_SCREAMING_SNAKE_CASE : Optional[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
_SCREAMING_SNAKE_CASE : List[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
_SCREAMING_SNAKE_CASE : Dict = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
_SCREAMING_SNAKE_CASE : Optional[Any] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
_SCREAMING_SNAKE_CASE : Optional[Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
_SCREAMING_SNAKE_CASE : str = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def _lowerCAmelCase ( UpperCAmelCase : Dict ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
UpperCamelCase__ : Tuple =unittest.skip('''test requires faiss''' )(UpperCAmelCase )
return test_case
def _lowerCAmelCase ( UpperCAmelCase : Tuple ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
UpperCamelCase__ : List[str] =unittest.skip('''test requires regex''' )(UpperCAmelCase )
return test_case
def _lowerCAmelCase ( UpperCAmelCase : List[str] ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase__ : Optional[int] =unittest.skip('''test requires elasticsearch''' )(UpperCAmelCase )
return test_case
def _lowerCAmelCase ( UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase__ : Optional[int] =unittest.skip('''test requires sqlalchemy''' )(UpperCAmelCase )
return test_case
def _lowerCAmelCase ( UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
UpperCamelCase__ : int =unittest.skip('''test requires PyTorch''' )(UpperCAmelCase )
return test_case
def _lowerCAmelCase ( UpperCAmelCase : Optional[int] ):
'''simple docstring'''
if not config.TF_AVAILABLE:
UpperCamelCase__ : Any =unittest.skip('''test requires TensorFlow''' )(UpperCAmelCase )
return test_case
def _lowerCAmelCase ( UpperCAmelCase : List[str] ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
UpperCamelCase__ : List[str] =unittest.skip('''test requires JAX''' )(UpperCAmelCase )
return test_case
def _lowerCAmelCase ( UpperCAmelCase : Dict ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
UpperCamelCase__ : List[str] =unittest.skip('''test requires Pillow''' )(UpperCAmelCase )
return test_case
def _lowerCAmelCase ( UpperCAmelCase : Any ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(UpperCAmelCase )
else:
return test_case
def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(UpperCAmelCase )
else:
return test_case
def _lowerCAmelCase ( UpperCAmelCase : List[Any] ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(UpperCAmelCase )
else:
return test_case
def _lowerCAmelCase ( UpperCAmelCase : str ):
'''simple docstring'''
def _require_spacy_model(UpperCAmelCase : Optional[Any] ):
try:
import spacy # noqa F401
spacy.load(UpperCAmelCase )
except ImportError:
return unittest.skip('''test requires spacy''' )(UpperCAmelCase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(UpperCAmelCase ) )(UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def _lowerCAmelCase ( UpperCAmelCase : Any ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(UpperCAmelCase )
else:
return test_case
def _lowerCAmelCase ( UpperCAmelCase : Any ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(UpperCAmelCase )
else:
return test_case
def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase__ : List[Any] =unittest.skip('''test is slow''' )(UpperCAmelCase )
return test_case
def _lowerCAmelCase ( UpperCAmelCase : int ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase__ : int =unittest.skip('''test is local''' )(UpperCAmelCase )
return test_case
def _lowerCAmelCase ( UpperCAmelCase : List[str] ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase__ : int =unittest.skip('''test is packaged''' )(UpperCAmelCase )
return test_case
def _lowerCAmelCase ( UpperCAmelCase : Optional[int] ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase__ : int =unittest.skip('''test requires remote''' )(UpperCAmelCase )
return test_case
def _lowerCAmelCase ( *UpperCAmelCase : Dict ):
'''simple docstring'''
def decorate(cls : str ):
for name, fn in cls.__dict__.items():
if callable(UpperCAmelCase ) and name.startswith('''test''' ):
for decorator in decorators:
UpperCamelCase__ : Optional[Any] =decorator(UpperCAmelCase )
setattr(cls , UpperCAmelCase , UpperCAmelCase )
return cls
return decorate
class __a ( snake_case__ ):
"""simple docstring"""
pass
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
@contextmanager
def _lowerCAmelCase ( UpperCAmelCase : str=OfflineSimulationMode.CONNECTION_FAILS , UpperCAmelCase : Dict=1E-16 ):
'''simple docstring'''
UpperCamelCase__ : Any =requests.Session().request
def timeout_request(UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : int ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase__ : Dict ='''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
UpperCamelCase__ : Dict =timeout
try:
return online_request(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase__ : Optional[Any] =url
UpperCamelCase__ : Union[str, Any] =e.args[0]
UpperCamelCase__ : Optional[int] =(max_retry_error.args[0].replace('''10.255.255.1''' , F'''OfflineMock[{url}]''' ),)
UpperCamelCase__ : List[str] =(max_retry_error,)
raise
def raise_connection_error(UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Tuple ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , UpperCAmelCase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _lowerCAmelCase ( *UpperCAmelCase : str , **UpperCAmelCase : str ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] =str(Path().resolve() )
with tempfile.TemporaryDirectory(*UpperCAmelCase , **UpperCAmelCase ) as tmp_dir:
try:
os.chdir(UpperCAmelCase )
yield
finally:
os.chdir(UpperCAmelCase )
@contextmanager
def _lowerCAmelCase ( ):
'''simple docstring'''
import gc
gc.collect()
UpperCamelCase__ : Union[str, Any] =pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowerCAmelCase ( ):
'''simple docstring'''
import gc
gc.collect()
UpperCamelCase__ : Tuple =pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowerCAmelCase ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ):
'''simple docstring'''
return deepcopy(UpperCAmelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(UpperCAmelCase ).integers(0 , 100 , 10 ).tolist()
def _lowerCAmelCase ( UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(UpperCAmelCase : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Tuple ):
try:
return func(*UpperCAmelCase , **UpperCAmelCase )
except HTTPError as err:
if str(UpperCAmelCase ).startswith('''500''' ) or str(UpperCAmelCase ).startswith('''502''' ):
pytest.xfail(str(UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , UpperCAmelCase )
class __a :
"""simple docstring"""
def __init__( self : List[str] , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Tuple ):
UpperCamelCase__ : Optional[Any] =returncode
UpperCamelCase__ : Optional[int] =stdout
UpperCamelCase__ : Any =stderr
async def _lowerCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : List[str] ):
'''simple docstring'''
while True:
UpperCamelCase__ : List[Any] =await stream.readline()
if line:
callback(UpperCAmelCase )
else:
break
async def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : int=None , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Dict=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(UpperCAmelCase ) )
UpperCamelCase__ : List[str] =await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase__ : List[Any] =[]
UpperCamelCase__ : Dict =[]
def tee(UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]="" ):
UpperCamelCase__ : Tuple =line.decode('''utf-8''' ).rstrip()
sink.append(UpperCAmelCase )
if not quiet:
print(UpperCAmelCase , UpperCAmelCase , file=UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda UpperCAmelCase : tee(UpperCAmelCase , UpperCAmelCase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda UpperCAmelCase : tee(UpperCAmelCase , UpperCAmelCase , sys.stderr , label='''stderr:''' ) ),
] , timeout=UpperCAmelCase , )
return _RunOutput(await p.wait() , UpperCAmelCase , UpperCAmelCase )
def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=180 , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=True ):
'''simple docstring'''
UpperCamelCase__ : Tuple =asyncio.get_event_loop()
UpperCamelCase__ : List[str] =loop.run_until_complete(
_stream_subprocess(UpperCAmelCase , env=UpperCAmelCase , stdin=UpperCAmelCase , timeout=UpperCAmelCase , quiet=UpperCAmelCase , echo=UpperCAmelCase ) )
UpperCamelCase__ : int =''' '''.join(UpperCAmelCase )
if result.returncode > 0:
UpperCamelCase__ : Dict ='''\n'''.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCamelCase__ : str =os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
UpperCamelCase__ : Tuple =re.sub(r'''^gw''' , '''''' , UpperCAmelCase , 0 , re.M )
return int(UpperCAmelCase )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCamelCase__ : str =29_500
UpperCamelCase__ : Optional[int] =pytest_xdist_worker_id()
return port + uniq_delta
| 157 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __a :
"""simple docstring"""
def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Any=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : int=True , lowercase_ : List[str]=True , lowercase_ : int=True , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=99 , lowercase_ : int=32 , lowercase_ : List[Any]=2 , lowercase_ : Optional[int]=4 , lowercase_ : Dict=37 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Dict=16 , lowercase_ : Optional[int]=2 , lowercase_ : Optional[int]=0.0_2 , lowercase_ : Dict=3 , lowercase_ : Optional[int]=4 , lowercase_ : Any=None , ):
UpperCamelCase__ : Any =parent
UpperCamelCase__ : Any =13
UpperCamelCase__ : int =7
UpperCamelCase__ : Tuple =True
UpperCamelCase__ : Dict =True
UpperCamelCase__ : int =True
UpperCamelCase__ : Tuple =True
UpperCamelCase__ : Any =99
UpperCamelCase__ : Any =32
UpperCamelCase__ : Union[str, Any] =2
UpperCamelCase__ : List[Any] =4
UpperCamelCase__ : Any =37
UpperCamelCase__ : Union[str, Any] ='''gelu'''
UpperCamelCase__ : Dict =0.1
UpperCamelCase__ : int =0.1
UpperCamelCase__ : Union[str, Any] =512
UpperCamelCase__ : Dict =16
UpperCamelCase__ : List[Any] =2
UpperCamelCase__ : str =0.0_2
UpperCamelCase__ : Optional[Any] =3
UpperCamelCase__ : List[str] =4
UpperCamelCase__ : Optional[int] =None
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : Any =None
if self.use_input_mask:
UpperCamelCase__ : List[Any] =random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : List[Any] =None
if self.use_token_type_ids:
UpperCamelCase__ : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ : str =None
UpperCamelCase__ : Union[str, Any] =None
UpperCamelCase__ : str =None
if self.use_labels:
UpperCamelCase__ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ : int =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Any , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : int ):
UpperCamelCase__ : str =TFRoFormerModel(config=lowercase_ )
UpperCamelCase__ : List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase__ : Dict =[input_ids, input_mask]
UpperCamelCase__ : Tuple =model(lowercase_ )
UpperCamelCase__ : str =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : int ):
UpperCamelCase__ : Optional[Any] =True
UpperCamelCase__ : List[Any] =TFRoFormerForCausalLM(config=lowercase_ )
UpperCamelCase__ : Optional[Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Any =model(lowercase_ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _lowerCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
UpperCamelCase__ : str =TFRoFormerForMaskedLM(config=lowercase_ )
UpperCamelCase__ : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Optional[int] =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : int ):
UpperCamelCase__ : Tuple =self.num_labels
UpperCamelCase__ : List[str] =TFRoFormerForSequenceClassification(config=lowercase_ )
UpperCamelCase__ : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Optional[Any] =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] ):
UpperCamelCase__ : Tuple =self.num_choices
UpperCamelCase__ : Tuple =TFRoFormerForMultipleChoice(config=lowercase_ )
UpperCamelCase__ : Optional[int] =tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ : int =tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ : List[str] =tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ : int ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase__ : Tuple =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Tuple ):
UpperCamelCase__ : Optional[int] =self.num_labels
UpperCamelCase__ : List[str] =TFRoFormerForTokenClassification(config=lowercase_ )
UpperCamelCase__ : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : int =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str ):
UpperCamelCase__ : Dict =TFRoFormerForQuestionAnswering(config=lowercase_ )
UpperCamelCase__ : Optional[Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : List[str] =model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : List[str] =self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) : Tuple =config_and_inputs
UpperCamelCase__ : Any ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __a ( snake_case__, snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def _lowerCAmelCase ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Tuple , lowercase_ : int ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : List[Any] =TFRoFormerModelTester(self )
UpperCamelCase__ : Any =ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def _lowerCAmelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowercase_ )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : Optional[Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(lowercase_ )
@require_tf
class __a ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : List[str] =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCamelCase__ : List[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ : Any =model(lowercase_ )[0]
# TODO Replace vocab size
UpperCamelCase__ : Union[str, Any] =5_0000
UpperCamelCase__ : Optional[Any] =[1, 6, vocab_size]
self.assertEqual(output.shape , lowercase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCamelCase__ : Optional[Any] =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-4 )
@require_tf
class __a ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1e-4
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : str =tf.constant([[4, 10]] )
UpperCamelCase__ : Dict =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCamelCase__ : Any =emba(input_ids.shape )
UpperCamelCase__ : Union[str, Any] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(lowercase_ , lowercase_ , atol=self.tolerance )
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : Dict =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
UpperCamelCase__ : int =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
UpperCamelCase__ : Optional[int] =emba.weight[:3, :5]
tf.debugging.assert_near(lowercase_ , lowercase_ , atol=self.tolerance )
@require_tf
class __a ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1e-4
def _lowerCAmelCase ( self : str ):
# 2,12,16,64
UpperCamelCase__ : Optional[int] =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCamelCase__ : Optional[int] =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCamelCase__ : Optional[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
UpperCamelCase__ : Union[str, Any] =embed_positions([2, 16, 768] )[None, None, :, :]
UpperCamelCase__ , UpperCamelCase__ : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase__ : Optional[int] =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
UpperCamelCase__ : List[str] =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowercase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowercase_ , atol=self.tolerance )
| 157 | 1 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def UpperCAmelCase_ ( __lowercase : SplitDict ) -> int:
'''simple docstring'''
_UpperCAmelCase = split_dict._to_yaml_list()
assert len(__lowercase ) == len(__lowercase )
_UpperCAmelCase = SplitDict._from_yaml_list(__lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_UpperCAmelCase = None
# the split name of split_dict takes over the name of the split info object
_UpperCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name="my_dataset" )] )
def UpperCAmelCase_ ( __lowercase : List[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 22 |
'''simple docstring'''
import re
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
return [char.split() for char in re.split(R'''[^ a-z A-Z 0-9 \s]''', str_ )]
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _a( UpperCamelCase__ : str, UpperCamelCase__ : bool, UpperCamelCase__ : str ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ : Any =split_input(UpperCamelCase__ )
if upper:
SCREAMING_SNAKE_CASE__ : int =''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
SCREAMING_SNAKE_CASE__ : Any =''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
return to_simple_case(UpperCamelCase__ )
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE__ : List[str] =to_simple_case(UpperCamelCase__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _a( UpperCamelCase__ : str, UpperCamelCase__ : bool ):
'''simple docstring'''
return to_complex_case(UpperCamelCase__, UpperCamelCase__, '''_''' )
def _a( UpperCamelCase__ : str, UpperCamelCase__ : bool ):
'''simple docstring'''
return to_complex_case(UpperCamelCase__, UpperCamelCase__, '''-''' )
if __name__ == "__main__":
__import__('doctest').testmod() | 152 | 0 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCAmelCase__ = CLIPTokenizer
lowerCAmelCase__ = CLIPTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = {}
lowerCAmelCase__ = False
def _lowercase ( self: Dict ):
'''simple docstring'''
super().setUp()
# fmt: off
_lowerCamelCase : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowerCamelCase : Tuple = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
_lowerCamelCase : str = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_lowerCamelCase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowerCamelCase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
def _lowercase ( self: Union[str, Any] ,**__lowerCAmelCase: int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def _lowercase ( self: Any ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = """lower newer"""
_lowerCamelCase : Tuple = """lower newer"""
return input_text, output_text
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_lowerCamelCase : Union[str, Any] = """lower newer"""
_lowerCamelCase : int = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_lowerCamelCase : int = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
_lowerCamelCase : Dict = tokens + [tokenizer.unk_token]
_lowerCamelCase : Tuple = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) ,__UpperCAmelCase )
@require_ftfy
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(__UpperCAmelCase ,**__UpperCAmelCase )
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase ,**__UpperCAmelCase )
_lowerCamelCase : Optional[int] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
_lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(__UpperCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_lowerCamelCase : Dict = """xa\u0303y""" + """ """ + """x\xe3y"""
_lowerCamelCase : int = tokenizer_s.tokenize(__UpperCAmelCase )
_lowerCamelCase : Dict = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
# Test that the tokenization is identical on unicode of space type
_lowerCamelCase : int = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_lowerCamelCase : Optional[int] = tokenizer_s.tokenize(__UpperCAmelCase )
_lowerCamelCase : Any = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
# Test that the tokenization is identical on unicode of line break type
_lowerCamelCase : Union[str, Any] = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_lowerCamelCase : str = tokenizer_s.tokenize(__UpperCAmelCase )
_lowerCamelCase : int = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCamelCase : Optional[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase : str = F"""{text_of_1_token} {text_of_1_token}"""
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase ,use_fast=__UpperCAmelCase ,)
_lowerCamelCase : Optional[Any] = tokenizer_r(__UpperCAmelCase ,return_offsets_mapping=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) ,)
_lowerCamelCase : Any = F""" {text}"""
_lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase ,use_fast=__UpperCAmelCase ,)
_lowerCamelCase : Dict = tokenizer_r(__UpperCAmelCase ,return_offsets_mapping=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) ,)
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def _lowercase ( self: str ):
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass | 366 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
_lowerCamelCase : Union[str, Any] = random.Random()
_lowerCamelCase : Union[str, Any] = 1
for dim in shape:
total_dims *= dim
_lowerCamelCase : Optional[int] = []
for _ in range(_lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase )
return output
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase )
# make sure that at least one token is attended to for each batch
_lowerCamelCase : List[str] = 1
return attn_mask
@require_flax
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = ()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCamelCase : List[str] = 2
_lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2
_lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length]
_lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase )
_lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCamelCase : List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Dict = max_length
_lowerCamelCase : Tuple = 0
for model_class in self.all_generative_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval()
_lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params )
_lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences
_lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = max_length
_lowerCamelCase : Dict = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[int] = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = max_length
_lowerCamelCase : Optional[Any] = 0.8
_lowerCamelCase : Union[str, Any] = 10
_lowerCamelCase : List[str] = 0.3
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : str = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : int = jit(model.generate )
_lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
_lowerCamelCase : Dict = max_length
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[str] = 8
_lowerCamelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : int = 2
_lowerCamelCase : int = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class A_ ( unittest.TestCase ):
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_lowerCamelCase : Optional[Any] = "Hello world"
_lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ):
model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ):
_lowerCamelCase : List[str] = {"foo": "bar"}
model.generate(__lowerCAmelCase ,**__lowerCAmelCase ) | 340 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'transfo-xl'
SCREAMING_SNAKE_CASE = ['mems']
SCREAMING_SNAKE_CASE = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __snake_case=26_7735 , __snake_case=[2_0000, 4_0000, 20_0000] , __snake_case=1024 , __snake_case=1024 , __snake_case=16 , __snake_case=64 , __snake_case=4096 , __snake_case=4 , __snake_case=False , __snake_case=18 , __snake_case=1600 , __snake_case=1000 , __snake_case=True , __snake_case=True , __snake_case=0 , __snake_case=-1 , __snake_case=True , __snake_case=0.1 , __snake_case=0.0 , __snake_case=True , __snake_case="normal" , __snake_case=0.01 , __snake_case=0.01 , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=0 , **__snake_case , ) -> str:
'''simple docstring'''
__a =vocab_size
__a =[]
self.cutoffs.extend(__snake_case )
if proj_share_all_but_first:
__a =[False] + [True] * len(self.cutoffs )
else:
__a =[False] + [False] * len(self.cutoffs )
__a =d_model
__a =d_embed
__a =d_head
__a =d_inner
__a =div_val
__a =pre_lnorm
__a =n_layer
__a =n_head
__a =mem_len
__a =same_length
__a =attn_type
__a =clamp_len
__a =sample_softmax
__a =adaptive
__a =dropout
__a =dropatt
__a =untie_r
__a =init
__a =init_range
__a =proj_init_std
__a =init_std
__a =layer_norm_epsilon
super().__init__(eos_token_id=__snake_case , **__snake_case )
@property
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __magic_name__ ( self , __snake_case ) -> Dict:
'''simple docstring'''
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 218 |
_lowerCAmelCase : Optional[int] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_lowerCAmelCase : Tuple = [{"type": "code", "content": INSTALL_CONTENT}]
_lowerCAmelCase : Optional[int] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 218 | 1 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowercase:
'''simple docstring'''
def __init__( self: str, a_: Any, a_: Union[str, Any]=13, a_: Any=7, a_: Optional[Any]=True, a_: str=True, a_: List[str]=99, a_: List[str]=32, a_: Tuple=5, a_: Any=4, a_: Tuple=37, a_: Optional[Any]="gelu", a_: Union[str, Any]=0.1, a_: Dict=0.1, a_: Union[str, Any]=50, a_: Any=0.02, a_: List[Any]=True, a_: str=None, ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Any = batch_size
_snake_case : Tuple = seq_length
_snake_case : Union[str, Any] = is_training
_snake_case : Tuple = use_input_mask
_snake_case : Optional[Any] = vocab_size
_snake_case : int = hidden_size
_snake_case : Tuple = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : str = intermediate_size
_snake_case : Union[str, Any] = hidden_act
_snake_case : int = hidden_dropout_prob
_snake_case : Dict = attention_probs_dropout_prob
_snake_case : Union[str, Any] = max_position_embeddings
_snake_case : Tuple = initializer_range
_snake_case : List[Any] = use_labels
_snake_case : str = scope
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_snake_case : Optional[Any] = None
if self.use_input_mask:
_snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_snake_case : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=a_, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
(
_snake_case
) : Tuple = self.prepare_config_and_inputs()
_snake_case : Tuple = True
_snake_case : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self: Optional[int], a_: Tuple, a_: Tuple, a_: str, a_: int, **a_: List[str], ):
'''simple docstring'''
_snake_case : Dict = BertGenerationEncoder(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, attention_mask=a_ )
_snake_case : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: List[Any], a_: Optional[Any], a_: Dict, a_: Optional[Any], a_: str, a_: int, a_: List[Any], **a_: List[Any], ):
'''simple docstring'''
_snake_case : Tuple = True
_snake_case : Any = BertGenerationEncoder(config=a_ )
model.to(a_ )
model.eval()
_snake_case : str = model(
a_, attention_mask=a_, encoder_hidden_states=a_, encoder_attention_mask=a_, )
_snake_case : List[str] = model(
a_, attention_mask=a_, encoder_hidden_states=a_, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self: int, a_: Optional[int], a_: str, a_: str, a_: Any, a_: Optional[Any], a_: str, **a_: List[Any], ):
'''simple docstring'''
_snake_case : Any = True
_snake_case : Dict = True
_snake_case : List[Any] = BertGenerationDecoder(config=a_ ).to(a_ ).eval()
# first forward pass
_snake_case : Any = model(
a_, attention_mask=a_, encoder_hidden_states=a_, encoder_attention_mask=a_, use_cache=a_, )
_snake_case : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_snake_case : List[str] = ids_tensor((self.batch_size, 3), config.vocab_size )
_snake_case : Dict = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
_snake_case : int = torch.cat([input_ids, next_tokens], dim=-1 )
_snake_case : List[str] = torch.cat([input_mask, next_mask], dim=-1 )
_snake_case : Any = model(
a_, attention_mask=a_, encoder_hidden_states=a_, encoder_attention_mask=a_, output_hidden_states=a_, )["""hidden_states"""][0]
_snake_case : str = model(
a_, attention_mask=a_, encoder_hidden_states=a_, encoder_attention_mask=a_, past_key_values=a_, output_hidden_states=a_, )["""hidden_states"""][0]
# select random slice
_snake_case : Union[str, Any] = ids_tensor((1,), output_from_past.shape[-1] ).item()
_snake_case : str = output_from_no_past[:, -3:, random_slice_idx].detach()
_snake_case : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_, a_, atol=1E-3 ) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Union[str, Any], a_: int, a_: List[str], a_: str, *a_: Optional[int], ):
'''simple docstring'''
_snake_case : Dict = BertGenerationDecoder(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, attention_mask=a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase__ = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase__ = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = BertGenerationEncoderTester(self )
_snake_case : Any = ConfigTester(self, config_class=a_, hidden_size=37 )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
_snake_case : Optional[int] = """bert"""
self.model_tester.create_and_check_model(a_, a_, a_, a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
(
_snake_case
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
_snake_case : str = None
self.model_tester.create_and_check_model_as_decoder(
a_, a_, a_, a_, a_, a_, )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*a_ )
@slow
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Any = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(a_ )
@require_torch
class lowercase( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
_snake_case : List[Any] = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
_snake_case : int = model(a_ )[0]
_snake_case : Union[str, Any] = torch.Size([1, 8, 1_024] )
self.assertEqual(output.shape, a_ )
_snake_case : Optional[int] = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], a_, atol=1E-4 ) )
@require_torch
class lowercase( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
_snake_case : Optional[int] = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
_snake_case : List[Any] = model(a_ )[0]
_snake_case : Tuple = torch.Size([1, 8, 50_358] )
self.assertEqual(output.shape, a_ )
_snake_case : Optional[Any] = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], a_, atol=1E-4 ) )
| 362 |
"""simple docstring"""
import torch
from torch import nn
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: Any, a_: List[str], a_: Union[str, Any], a_: int, a_: int, a_: List[Any]=1, a_: Union[str, Any]=False ):
'''simple docstring'''
super().__init__()
_snake_case : int = n_token
_snake_case : Tuple = d_embed
_snake_case : List[str] = d_proj
_snake_case : Optional[int] = cutoffs + [n_token]
_snake_case : Any = [0] + self.cutoffs
_snake_case : Tuple = div_val
_snake_case : Optional[int] = self.cutoffs[0]
_snake_case : Union[str, Any] = len(self.cutoffs ) - 1
_snake_case : Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_snake_case : List[Any] = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed ) )
_snake_case : Tuple = nn.Parameter(torch.zeros(self.n_clusters ) )
_snake_case : Any = nn.ModuleList()
_snake_case : Optional[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a_, a_ ) ) )
else:
self.out_projs.append(a_ )
self.out_layers.append(nn.Linear(a_, a_ ) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a_, a_ ) ) )
self.out_layers.append(nn.Linear(a_, r_idx - l_idx ) )
_snake_case : Optional[int] = keep_order
def UpperCamelCase_ ( self: str, a_: Union[str, Any], a_: Dict, a_: int, a_: Tuple ):
'''simple docstring'''
if proj is None:
_snake_case : List[Any] = nn.functional.linear(a_, a_, bias=a_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_snake_case : List[Any] = nn.functional.linear(a_, proj.t().contiguous() )
_snake_case : Tuple = nn.functional.linear(a_, a_, bias=a_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase_ ( self: Dict, a_: Dict, a_: str=None, a_: Union[str, Any]=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
_snake_case : int = hidden[..., :-1, :].contiguous()
_snake_case : List[Any] = labels[..., 1:].contiguous()
_snake_case : str = hidden.view(-1, hidden.size(-1 ) )
_snake_case : Dict = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
_snake_case : int = hidden.view(-1, hidden.size(-1 ) )
if self.n_clusters == 0:
_snake_case : Tuple = self._compute_logit(a_, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
if labels is not None:
_snake_case : Dict = labels != -100
_snake_case : str = torch.zeros_like(a_, dtype=hidden.dtype, device=hidden.device )
_snake_case : str = (
-nn.functional.log_softmax(a_, dim=-1 )[mask].gather(1, labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_snake_case : Optional[int] = nn.functional.log_softmax(a_, dim=-1 )
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_snake_case , _snake_case : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Optional[int] = self.out_layers[i].weight
_snake_case : int = self.out_layers[i].bias
if i == 0:
_snake_case : List[str] = torch.cat([weight_i, self.cluster_weight], dim=0 )
_snake_case : int = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(a_ )
biases.append(a_ )
_snake_case , _snake_case , _snake_case : Any = weights[0], biases[0], self.out_projs[0]
_snake_case : List[str] = self._compute_logit(a_, a_, a_, a_ )
_snake_case : Union[str, Any] = nn.functional.log_softmax(a_, dim=1 )
if labels is None:
_snake_case : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_snake_case : Dict = torch.zeros_like(a_, dtype=hidden.dtype, device=hidden.device )
_snake_case : Union[str, Any] = 0
_snake_case : Optional[Any] = [0] + self.cutoffs
for i in range(len(a_ ) - 1 ):
_snake_case , _snake_case : Dict = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_snake_case : Dict = (labels >= l_idx) & (labels < r_idx)
_snake_case : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_snake_case : List[str] = labels.index_select(0, a_ ) - l_idx
_snake_case : List[str] = head_logprob.index_select(0, a_ )
_snake_case : List[str] = hidden.index_select(0, a_ )
else:
_snake_case : List[str] = hidden
if i == 0:
if labels is not None:
_snake_case : Dict = head_logprob_i.gather(1, target_i[:, None] ).squeeze(1 )
else:
_snake_case : Optional[Any] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : Dict = weights[i], biases[i], self.out_projs[i]
_snake_case : int = self._compute_logit(a_, a_, a_, a_ )
_snake_case : int = nn.functional.log_softmax(a_, dim=1 )
_snake_case : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_snake_case : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None] ).squeeze(1 )
else:
_snake_case : Optional[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_snake_case : Any = logprob_i
if labels is not None:
if (hasattr(self, """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0, a_, -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
if self.n_clusters == 0:
_snake_case : Optional[int] = self._compute_logit(a_, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
return nn.functional.log_softmax(a_, dim=-1 )
else:
# construct weights and biases
_snake_case , _snake_case : List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_snake_case , _snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Optional[int] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : str = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : List[Any] = self.out_layers[i].weight
_snake_case : Union[str, Any] = self.out_layers[i].bias
if i == 0:
_snake_case : int = torch.cat([weight_i, self.cluster_weight], dim=0 )
_snake_case : Dict = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(a_ )
biases.append(a_ )
_snake_case , _snake_case , _snake_case : int = weights[0], biases[0], self.out_projs[0]
_snake_case : List[Any] = self._compute_logit(a_, a_, a_, a_ )
_snake_case : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_snake_case : int = nn.functional.log_softmax(a_, dim=1 )
_snake_case : List[Any] = [0] + self.cutoffs
for i in range(len(a_ ) - 1 ):
_snake_case , _snake_case : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_snake_case : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : List[Any] = weights[i], biases[i], self.out_projs[i]
_snake_case : Optional[int] = self._compute_logit(a_, a_, a_, a_ )
_snake_case : Any = nn.functional.log_softmax(a_, dim=1 )
_snake_case : Dict = head_logprob[:, -i] + tail_logprob_i
_snake_case : Any = logprob_i
return out
| 132 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase :Union[str, Any] = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Dict = ['''LayoutLMv3FeatureExtractor''']
lowerCamelCase :Any = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 206 | """simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__A = get_tests_dir('''fixtures''')
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[Any] ):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase : List[str] = mock.Mock()
__lowerCamelCase : Any = 500
__lowerCamelCase : int = {}
__lowerCamelCase : Optional[Any] = HTTPError
__lowerCamelCase : List[str] = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : int = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase ) as mock_head:
__lowerCamelCase : str = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase__ ( self : Dict ):
# This test is for deprecated behavior and can be removed in v5
__lowerCamelCase : List[str] = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def lowerCamelCase__ ( self : str ):
with self.assertRaises(UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCamelCase : Dict = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
__lowerCamelCase : Any = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(UpperCAmelCase )
@is_staging_test
class _snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase__ ( cls : List[str] ):
__lowerCamelCase : Any = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(UpperCAmelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
__lowerCamelCase : Tuple = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase , repo_id="test-image-processor" , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCamelCase : int = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : List[Any] = ViTImageProcessor.from_pretrained(UpperCAmelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
__lowerCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCamelCase : Any = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
CustomImageProcessor.register_for_auto_class()
__lowerCamelCase : int = CustomImageProcessor.from_pretrained(UpperCAmelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
__lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" ) | 135 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = ReformerTokenizer
__lowerCAmelCase = ReformerTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().setUp()
a =ReformerTokenizer(__A , keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a ='''<s>'''
a =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__A ) , 1000 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
a =self.get_tokenizer()
a =self.get_rust_tokenizer()
a ='''I was born in 92000, and this is falsé.'''
a =tokenizer.tokenize(__A )
a =rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
a =tokenizer.encode(__A , add_special_tokens=__A )
a =rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
a =self.get_rust_tokenizer()
a =tokenizer.encode(__A )
a =rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self , __A=15 ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a =self.rust_tokenizer_class.from_pretrained(__A , **__A )
# Simple input
a ='''This is a simple input'''
a =['''This is a simple input 1''', '''This is a simple input 2''']
a =('''This is a simple input''', '''This is a pair''')
a =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding='''max_length''' )
# Simple input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding='''max_length''' )
# Simple input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding='''max_length''' , )
# Pair input
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding='''max_length''' )
# Pair input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding='''max_length''' )
# Pair input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding='''max_length''' , )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =ReformerTokenizer(__A , keep_accents=__A )
a =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] , )
a =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a =tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a =tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a ='''Hello World!'''
a =[126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
a =[
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
a =list(self.big_tokenizer.get_vocab().keys() )[:10]
a =''' '''.join(__A )
a =self.big_tokenizer.encode_plus(__A , return_tensors='''pt''' )
a =self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
a =ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
a =encoded_sequence['''input_ids'''].shape
a =ReformerModel(__A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__A )
model(**__A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# fmt: off
a ={'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
a =[
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=__A , sequences=__A , ) | 215 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = RoFormerTokenizer
__lowerCAmelCase = RoFormerTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
super().setUp()
def SCREAMING_SNAKE_CASE ( self , **__A ) -> Optional[int]:
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__A )
def SCREAMING_SNAKE_CASE ( self , **__A ) -> List[Any]:
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__A )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a ='''永和服装饰品有限公司,今天天气非常好'''
a ='''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =self.get_tokenizer()
a , a =self.get_chinese_input_output_texts()
a =tokenizer.tokenize(__A )
self.assertListEqual(__A , output_text.split() )
a =tokens + [tokenizer.unk_token]
a =[2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.get_rust_tokenizer()
a , a =self.get_chinese_input_output_texts()
a =tokenizer.tokenize(__A )
self.assertListEqual(__A , output_text.split() )
a =tokens + [tokenizer.unk_token]
a =[2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE ( self ) -> int:
pass | 215 | 1 |
'''simple docstring'''
from __future__ import annotations
import requests
__SCREAMING_SNAKE_CASE :Tuple = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def UpperCAmelCase_ ( __lowercase : str , __lowercase : int = 1 , __lowercase : str = "new" , __lowercase : list | None = None ) -> dict:
'''simple docstring'''
_UpperCAmelCase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowercase ) - valid_terms ) ):
_UpperCAmelCase = f'Invalid search term: {invalid_search_terms}'
raise ValueError(__lowercase )
_UpperCAmelCase = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
_UpperCAmelCase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowercase )}
_UpperCAmelCase = {}
for id_ in range(__lowercase ):
_UpperCAmelCase = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 22 |
'''simple docstring'''
import string
from math import logaa
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
_UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> tuple[int, int]:
'''simple docstring'''
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("\n" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__lowercase ))
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any]=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 22 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__snake_case = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
snake_case : List[Any] = self.transformer_dir
shutil.copy(
os.path.join(UpperCamelCase__ , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Dict = "src/transformers"
shutil.rmtree(self.transformer_dir )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
snake_case : str = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
snake_case : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
snake_case : Any = black.format_str(UpperCamelCase__ , mode=UpperCamelCase__ )
snake_case : Optional[Any] = os.path.join(self.transformer_dir , "new_code.py" )
with open(UpperCamelCase__ , "w" , newline="\n" ) as f:
f.write(UpperCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase__ )
with open(UpperCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , UpperCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , UpperCamelCase__ ) , )
# Copy consistency with a really long name
snake_case : List[Any] = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub("Bert" , UpperCamelCase__ , UpperCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , UpperCamelCase__ , overwrite_result=re.sub("Bert" , "TestModel" , UpperCamelCase__ ) , )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : int = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
snake_case : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
snake_case : Dict = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
snake_case : List[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
snake_case ,snake_case : Union[str, Any] = check_copies.convert_to_localized_md(
UpperCamelCase__ , UpperCamelCase__ , localized_readme["format_model_list"] )
self.assertFalse(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case ,snake_case : int = check_copies.convert_to_localized_md(
UpperCamelCase__ , UpperCamelCase__ , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCamelCase__ )
snake_case : int = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
snake_case : List[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
snake_case : List[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
snake_case ,snake_case : Any = check_copies.convert_to_localized_md(
UpperCamelCase__ , UpperCamelCase__ , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 112 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__snake_case = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def __lowerCAmelCase ( lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
snake_case : Optional[Any] = list(s_dict.keys() )
for key in keys:
snake_case : Any = R".*/layers_(\d+)"
snake_case : Tuple = key
if re.match(lowercase , lowercase ):
snake_case : List[str] = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowercase )
snake_case : Union[str, Any] = R"(encoder|decoder)\/"
if re.match(lowercase , lowercase ):
snake_case : Any = re.match(lowercase , lowercase ).groups()
if groups[0] == "encoder":
snake_case : Union[str, Any] = re.sub(R"/mlp/" , R"/1/mlp/" , lowercase )
snake_case : int = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowercase )
elif groups[0] == "decoder":
snake_case : str = re.sub(R"/mlp/" , R"/2/mlp/" , lowercase )
snake_case : List[str] = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowercase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
snake_case : int = new_key.replace(lowercase , lowercase )
print(F'{key} -> {new_key}' )
snake_case : Optional[Any] = s_dict.pop(lowercase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
snake_case : int = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
snake_case : Optional[Any] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
snake_case : Tuple = s_dict[key].shape[0]
snake_case : int = s_dict[key]
for idx in range(lowercase ):
snake_case : List[str] = expert_weihts[idx]
print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowercase )
return s_dict
__snake_case = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def __lowerCAmelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> int:
"""simple docstring"""
import regex as re
with open(lowercase , "r" ) as f:
snake_case : List[str] = f.read()
snake_case : Tuple = re.findall(R"(.*) = ([0-9.]*)" , lowercase )
snake_case : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
snake_case : Tuple = float(lowercase ) if "." in value else int(lowercase )
snake_case : List[str] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowercase )[0]
snake_case : List[Any] = str(activation[1] )
snake_case : Optional[Any] = num_experts
snake_case : List[Any] = SwitchTransformersConfig(**lowercase )
return config
def __lowerCAmelCase ( lowercase : Tuple , lowercase : Tuple , lowercase : Union[str, Any]=None , lowercase : Any="./" , lowercase : int=8 ) -> Dict:
"""simple docstring"""
print(F'Loading flax weights from : {flax_checkpoint_path}' )
snake_case : Union[str, Any] = checkpoints.load_tax_checkpoint(lowercase )
if gin_file is not None:
snake_case : List[str] = convert_gin_to_config(lowercase , lowercase )
else:
snake_case : str = SwitchTransformersConfig.from_pretrained(lowercase )
snake_case : Any = SwitchTransformersForConditionalGeneration(lowercase )
snake_case : Optional[Any] = flax_params["target"]
snake_case : Optional[int] = flatten_dict(lowercase , sep="/" )
snake_case : Optional[Any] = rename_keys(lowercase )
snake_case : List[str] = unflatten_dict(lowercase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowercase , lowercase )
print(F'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
__snake_case = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 112 | 1 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowerCAmelCase__ = threading.Lock()
lowerCAmelCase__ = None
lowerCAmelCase__ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
lowerCAmelCase__ = logging.WARNING
lowerCAmelCase__ = True
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = os.getenv("TRANSFORMERS_VERBOSITY" , SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def a__ ( ):
'''simple docstring'''
return __name__.split("." )[0]
def a__ ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def a__ ( ):
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowerCAmelCase : int = logging.StreamHandler() # Set sys.stderr as stream.
lowerCAmelCase : Optional[int] = sys.stderr.flush
# Apply our default configuration to the library root logger.
lowerCAmelCase : Optional[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
lowerCAmelCase : List[Any] = False
def a__ ( ):
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
lowerCAmelCase : Tuple = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
lowerCAmelCase : Optional[Any] = None
def a__ ( ):
'''simple docstring'''
return log_levels
def a__ ( SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if name is None:
lowerCAmelCase : List[str] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a__ ( SCREAMING_SNAKE_CASE : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : logging.Handler ):
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
lowerCAmelCase : Optional[Any] = False
def a__ ( ):
'''simple docstring'''
_configure_library_root_logger()
lowerCAmelCase : List[Any] = True
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = _get_library_root_logger().handlers
for handler in handlers:
lowerCAmelCase : Optional[Any] = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Dict = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(SCREAMING_SNAKE_CASE )
def a__ ( self : int , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , SCREAMING_SNAKE_CASE )
if no_advisory_warnings:
return
self.warning(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = warning_advice
@functools.lru_cache(SCREAMING_SNAKE_CASE )
def a__ ( self : str , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
self.warning(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = warning_once
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ): # pylint: disable=unused-argument
"""simple docstring"""
lowerCAmelCase : Dict = args[0] if args else None
def __iter__( self ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , snake_case__ ):
"""simple docstring"""
def empty_fn(*snake_case__ , **snake_case__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
"""simple docstring"""
return self
def __exit__( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __call__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*snake_case__ , **snake_case__ )
else:
return EmptyTqdm(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCAmelCase__ = _tqdm_cls()
def a__ ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ):
'''simple docstring'''
global _tqdm_active
lowerCAmelCase : Optional[Any] = True
hf_hub_utils.enable_progress_bars()
def a__ ( ):
'''simple docstring'''
global _tqdm_active
lowerCAmelCase : Tuple = False
hf_hub_utils.disable_progress_bars()
| 108 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
create_state_space_tree(_UpperCAmelCase , [] , 0 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
if index == len(_UpperCAmelCase ):
print(_UpperCAmelCase )
return
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_UpperCAmelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 50 | 0 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A__ : List[str] = get_logger(__name__)
A__ : int = Path(__file__).parent / '''model_card_template.md'''
A__ : str = uuida().hex
A__ : Optional[Any] = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
A__ : Optional[int] = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
A__ : Any = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def a_ ( _UpperCAmelCase : Union[Dict, str, None] = None ) -> str:
__snake_case : List[str] = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' ,'' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
ua += "; " + user_agent
return ua
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ,_UpperCAmelCase : Optional[str] = None ) -> str:
if token is None:
__snake_case : Optional[int] = HfFolder.get_token()
if organization is None:
__snake_case : Union[str, Any] = whoami(_UpperCAmelCase )['name']
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def a_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : List[Any] ) -> int:
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(_UpperCAmelCase ,'local_rank' ) and args.local_rank not in [-1, 0]:
return
__snake_case : str = args.hub_token if hasattr(_UpperCAmelCase ,'hub_token' ) else None
__snake_case : Optional[int] = get_full_repo_name(_UpperCAmelCase ,token=_UpperCAmelCase )
__snake_case : int = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' ,license='apache-2.0' ,library_name='diffusers' ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=_UpperCAmelCase ,model_name=_UpperCAmelCase ,repo_name=_UpperCAmelCase ,dataset_name=args.dataset_name if hasattr(_UpperCAmelCase ,'dataset_name' ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_UpperCAmelCase ,'gradient_accumulation_steps' ) else None
) ,adam_betaa=args.adam_betaa if hasattr(_UpperCAmelCase ,'adam_beta1' ) else None ,adam_betaa=args.adam_betaa if hasattr(_UpperCAmelCase ,'adam_beta2' ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(_UpperCAmelCase ,'adam_weight_decay' ) else None ,adam_epsilon=args.adam_epsilon if hasattr(_UpperCAmelCase ,'adam_epsilon' ) else None ,lr_scheduler=args.lr_scheduler if hasattr(_UpperCAmelCase ,'lr_scheduler' ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(_UpperCAmelCase ,'lr_warmup_steps' ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(_UpperCAmelCase ,'ema_inv_gamma' ) else None ,ema_power=args.ema_power if hasattr(_UpperCAmelCase ,'ema_power' ) else None ,ema_max_decay=args.ema_max_decay if hasattr(_UpperCAmelCase ,'ema_max_decay' ) else None ,mixed_precision=args.mixed_precision ,)
__snake_case : List[Any] = os.path.join(args.output_dir ,'README.md' )
model_card.save(_UpperCAmelCase )
def a_ ( _UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] = None ) -> Dict:
if resolved_file is None or commit_hash is not None:
return commit_hash
__snake_case : List[str] = str(Path(_UpperCAmelCase ).as_posix() )
__snake_case : List[Any] = re.search(r'snapshots/([^/]+)/' ,_UpperCAmelCase )
if search is None:
return None
__snake_case : str = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_UpperCAmelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A__ : Optional[Any] = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
A__ : List[str] = os.path.join(hf_cache_home, '''diffusers''')
def a_ ( _UpperCAmelCase : Optional[str] = None ,_UpperCAmelCase : Optional[str] = None ) -> None:
if new_cache_dir is None:
__snake_case : Tuple = DIFFUSERS_CACHE
if old_cache_dir is None:
__snake_case : str = old_diffusers_cache
__snake_case : str = Path(_UpperCAmelCase ).expanduser()
__snake_case : int = Path(_UpperCAmelCase ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__snake_case : Optional[Any] = new_cache_dir / old_blob_path.relative_to(_UpperCAmelCase )
new_blob_path.parent.mkdir(parents=_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
os.replace(_UpperCAmelCase ,_UpperCAmelCase )
try:
os.symlink(_UpperCAmelCase ,_UpperCAmelCase )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A__ : str = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
A__ : List[Any] = 0
else:
with open(cache_version_file) as f:
try:
A__ : Tuple = int(f.read())
except ValueError:
A__ : List[str] = 0
if cache_version < 1:
A__ : Any = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
A__ : int = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ) -> str:
if variant is not None:
__snake_case : List[Any] = weights_name.split('.' )
__snake_case : Dict = splits[:-1] + [variant] + splits[-1:]
__snake_case : Optional[Any] = '.'.join(_UpperCAmelCase )
return weights_name
def a_ ( _UpperCAmelCase : Dict ,*,
_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[str]=None ,) -> Optional[Any]:
__snake_case : Optional[Any] = str(_UpperCAmelCase )
if os.path.isfile(_UpperCAmelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(_UpperCAmelCase ):
if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) ):
# Load from a PyTorch checkpoint
__snake_case : Tuple = os.path.join(_UpperCAmelCase ,_UpperCAmelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ):
__snake_case : List[Any] = os.path.join(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_UpperCAmelCase ).base_version ) >= version.parse('0.20.0' )
):
try:
__snake_case : Optional[Any] = hf_hub_download(
_UpperCAmelCase ,filename=_add_variant(_UpperCAmelCase ,_UpperCAmelCase ) ,cache_dir=_UpperCAmelCase ,force_download=_UpperCAmelCase ,proxies=_UpperCAmelCase ,resume_download=_UpperCAmelCase ,local_files_only=_UpperCAmelCase ,use_auth_token=_UpperCAmelCase ,user_agent=_UpperCAmelCase ,subfolder=_UpperCAmelCase ,revision=revision or commit_hash ,)
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,_UpperCAmelCase ,)
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_UpperCAmelCase ,_UpperCAmelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_UpperCAmelCase ,_UpperCAmelCase )}\' so that the correct variant file can be added.''' ,_UpperCAmelCase ,)
try:
# 2. Load model file as usual
__snake_case : List[Any] = hf_hub_download(
_UpperCAmelCase ,filename=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,force_download=_UpperCAmelCase ,proxies=_UpperCAmelCase ,resume_download=_UpperCAmelCase ,local_files_only=_UpperCAmelCase ,use_auth_token=_UpperCAmelCase ,user_agent=_UpperCAmelCase ,subfolder=_UpperCAmelCase ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'this model name. Check the model page at '
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = tempfile.mkdtemp()
# fmt: off
__snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__snake_case : List[str] = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def A_ ( self : Optional[int] , **__a : Dict ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : int , **__a : Dict ) -> Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Dict = self.get_image_processor()
__snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(__a , return_tensors='np' )
__snake_case : List[str] = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Optional[int] = 'lower newer'
__snake_case : Dict = processor(text=__a )
__snake_case : List[Any] = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : List[Any] = 'lower newer'
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(__a ):
processor()
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : int = processor.batch_decode(__a )
__snake_case : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Union[str, Any] = 'lower newer'
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCamelCase = logging.get_logger(__name__)
class _a ( _lowercase):
_a : Optional[int] = ['pixel_values']
def __init__( self : str , _SCREAMING_SNAKE_CASE : Tuple = True , _SCREAMING_SNAKE_CASE : List[Any] = None , _SCREAMING_SNAKE_CASE : str = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE : int = True , _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 / 255 , _SCREAMING_SNAKE_CASE : List[str] = True , _SCREAMING_SNAKE_CASE : str = None , _SCREAMING_SNAKE_CASE : Union[str, Any] = True , **_SCREAMING_SNAKE_CASE : int , )-> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowerCAmelCase__ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
lowerCAmelCase__ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
lowerCAmelCase__ : int = do_resize
lowerCAmelCase__ : str = size
lowerCAmelCase__ : str = resample
lowerCAmelCase__ : Dict = do_rescale
lowerCAmelCase__ : Dict = rescale_factor
lowerCAmelCase__ : List[Any] = do_center_crop
lowerCAmelCase__ : Optional[Any] = crop_size
lowerCAmelCase__ : Union[str, Any] = do_flip_channel_order
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] = PIL.Image.BILINEAR , _SCREAMING_SNAKE_CASE : Union[str, Any] = None , **_SCREAMING_SNAKE_CASE : List[str] , )-> np.ndarray:
lowerCAmelCase__ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase__ : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] = None , **_SCREAMING_SNAKE_CASE : Tuple , )-> np.ndarray:
lowerCAmelCase__ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] = None , **_SCREAMING_SNAKE_CASE : List[str] , )-> Any:
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str = None )-> np.ndarray:
return flip_channel_order(SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] = None , _SCREAMING_SNAKE_CASE : Optional[Any] = None , _SCREAMING_SNAKE_CASE : Tuple = None , _SCREAMING_SNAKE_CASE : Tuple = None , _SCREAMING_SNAKE_CASE : int = None , _SCREAMING_SNAKE_CASE : Union[str, Any] = None , _SCREAMING_SNAKE_CASE : List[str] = None , _SCREAMING_SNAKE_CASE : Optional[Any] = None , _SCREAMING_SNAKE_CASE : List[str] = None , _SCREAMING_SNAKE_CASE : List[Any] = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE : int , )-> PIL.Image.Image:
lowerCAmelCase__ : Any = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Tuple = resample if resample is not None else self.resample
lowerCAmelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Any = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowerCAmelCase__ : Dict = size if size is not None else self.size
lowerCAmelCase__ : str = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
lowerCAmelCase__ : str = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowerCAmelCase__ : List[str] = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Tuple = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowerCAmelCase__ : Optional[int] = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowerCAmelCase__ : str = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase__ : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase__ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] = None )-> Dict:
lowerCAmelCase__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Tuple = target_sizes.numpy()
lowerCAmelCase__ : Union[str, Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ : str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Union[str, Any] = logits.argmax(dim=1 )
lowerCAmelCase__ : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 131 |
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ):
UpperCamelCase :Tuple = len(SCREAMING_SNAKE_CASE__ )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCamelCase :Dict = 0
print(SCREAMING_SNAKE_CASE__ , end=''',''' )
# Consider rest of the activities
for j in range(SCREAMING_SNAKE_CASE__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(SCREAMING_SNAKE_CASE__ , end=''',''' )
UpperCamelCase :List[str] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = [1, 3, 0, 5, 8, 5]
__snake_case = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 259 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowercase_ ( _lowerCamelCase : Union[str, Any]=32 , _lowerCamelCase : str=10 , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=1026 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : List[str]="data/tokenized_stories_train_wikitext103.jbl" , _lowerCamelCase : Tuple="igf_context_pairs.jbl" , ):
set_seed(3)
# generate train_data and objective_set
lowercase__ , lowercase__ : Dict = generate_datasets(
_lowerCAmelCase , _lowerCAmelCase , number=_lowerCAmelCase , min_len=1026 , trim=_lowerCAmelCase)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase__ : str = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# load pretrained model
lowercase__ : Optional[int] = load_gpta("gpt2").to(_lowerCAmelCase)
print("computing perplexity on objective set")
lowercase__ : Dict = compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase).item()
print("perplexity on objective set:" , _lowerCAmelCase)
# collect igf pairs and save to file demo.jbl
collect_objective_set(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=15 , _lowerCamelCase : str=128 , _lowerCamelCase : List[str]=100 , _lowerCamelCase : Dict="igf_model.pt" , ):
set_seed(42)
# Load pre-trained model
lowercase__ : str = GPTaLMHeadModel.from_pretrained("gpt2")
# Initialize secondary learner to use embedding weights of model
lowercase__ : Any = SecondaryLearner(_lowerCAmelCase)
# Train secondary learner
lowercase__ : List[str] = train_secondary_learner(
_lowerCAmelCase , _lowerCAmelCase , max_epochs=_lowerCAmelCase , batch_size=_lowerCAmelCase , eval_freq=100 , igf_model_path=_lowerCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Any=32 , _lowerCamelCase : int=1000 , _lowerCamelCase : Tuple=16 , _lowerCamelCase : Any=1.0 , _lowerCamelCase : List[str]=recopy_gpta , _lowerCamelCase : str=None , _lowerCamelCase : int=10 , _lowerCamelCase : int="gpt2_finetuned.pt" , ):
lowercase__ : List[str] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
lowercase__ : Tuple = RandomSampler(_lowerCAmelCase)
lowercase__ : str = DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase)
lowercase__ : int = max_steps // (len(_lowerCAmelCase)) + 1
lowercase__ : str = 0
lowercase__ : Tuple = torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCAmelCase)
lowercase__ , lowercase__ , lowercase__ : Optional[int] = recopy_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
model.train()
if secondary_learner is not None:
secondary_learner.to(_lowerCAmelCase)
secondary_learner.eval()
lowercase__ : Optional[Any] = []
lowercase__ : Union[str, Any] = 0
lowercase__ : Optional[int] = []
lowercase__ : Optional[int] = []
# Compute the performance of the transformer model at the beginning
lowercase__ : Dict = compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
test_perps.append(_lowerCAmelCase)
print("Test perplexity, step" , _lowerCAmelCase , ":" , _lowerCAmelCase)
for epoch in range(int(_lowerCAmelCase)):
for step, example in enumerate(_lowerCAmelCase):
torch.cuda.empty_cache()
lowercase__ : int = random.randint(0 , example.size(2) - context_len - 1)
lowercase__ : Tuple = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase__ : Optional[int] = model(_lowerCAmelCase , labels=_lowerCAmelCase)
lowercase__ : Dict = True
if secondary_learner is not None:
lowercase__ : Any = secondary_learner.forward(
torch.tensor(_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase).unsqueeze(0))[0].item()
observed_qs.append(float(_lowerCAmelCase))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase__ : Any = -1
if predicted_q < threshold:
lowercase__ : Dict = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
lowercase__ : Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase__ : List[str] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase__ : Optional[Any] = compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
test_perps.append(_lowerCAmelCase)
print("Test perplexity, step" , _lowerCAmelCase , ":" , _lowerCAmelCase)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _lowerCAmelCase)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowercase_ ( ):
lowercase__ : Any = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task")
# Required parameters
parser.add_argument(
"--data_dir" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=_lowerCAmelCase , default=_lowerCAmelCase , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="A seed for reproducible training.")
parser.add_argument(
"--context_len" , default=32 , type=_lowerCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=_lowerCAmelCase , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=_lowerCAmelCase , help="secondary model evaluation is triggered at eval_freq")
parser.add_argument("--max_steps" , default=1000 , type=_lowerCAmelCase , help="To calculate training epochs")
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=_lowerCAmelCase , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=_lowerCAmelCase , help="batch size of training data of language model(gpt2) ")
parser.add_argument(
"--eval_interval" , default=10 , type=_lowerCAmelCase , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=_lowerCAmelCase , help="The number of examples split to be used as objective_set/test_data")
parser.add_argument(
"--min_len" , default=1026 , type=_lowerCAmelCase , help="The minimum length of the article to be used as objective set")
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=_lowerCAmelCase , help="number of epochs to train secondary learner")
parser.add_argument("--trim" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="truncate the example if it exceeds context length")
parser.add_argument(
"--threshold" , default=1.0 , type=_lowerCAmelCase , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_lowerCAmelCase , help="finetuned_model_name")
parser.add_argument(
"--recopy_model" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_lowerCAmelCase , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
lowercase__ : Dict = joblib.load("data/IGF_values.jbl")
# Train secondary learner
lowercase__ : Tuple = training_secondary_learner(
_lowerCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
lowercase__ : Optional[Any] = GPTaLMHeadModel.from_pretrained("gpt2")
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
lowercase__ , lowercase__ : Union[str, Any] = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=_lowerCAmelCase)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCAmelCase , secondary_learner=_lowerCAmelCase , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 354 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 333 | 0 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( _lowercase):
def __init__( self : List[Any] , __UpperCamelCase : VQModel , __UpperCamelCase : UNetaDModel , __UpperCamelCase : DDIMScheduler ) -> Optional[Any]:
super().__init__()
self.register_modules(vqvae=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self : List[Any] , __UpperCamelCase : int = 1 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 50 , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , **__UpperCamelCase : Optional[int] , ) -> Union[Tuple, ImagePipelineOutput]:
_UpperCamelCase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__UpperCamelCase , )
_UpperCamelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__UpperCamelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
_UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCamelCase = {}
if accepts_eta:
_UpperCamelCase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
_UpperCamelCase = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
_UpperCamelCase = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# decode the image latents with the VAE
_UpperCamelCase = self.vqvae.decode(__UpperCamelCase ).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 256 | """simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ['''input_values''', '''padding_mask''']
def __init__( self : Optional[Any] , __UpperCamelCase : int = 1 , __UpperCamelCase : int = 2_4000 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = None , __UpperCamelCase : float = None , **__UpperCamelCase : Optional[Any] , ) -> Optional[int]:
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = chunk_length_s
_UpperCamelCase = overlap
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Union[str, Any] , __UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCamelCase : Optional[Union[bool, str, PaddingStrategy]] = None , __UpperCamelCase : Optional[bool] = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Optional[int] = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
_UpperCamelCase = True
_UpperCamelCase = bool(
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_UpperCamelCase = [np.asarray(__UpperCamelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
_UpperCamelCase = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_UpperCamelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCamelCase = [np.asarray(__UpperCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCamelCase ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
_UpperCamelCase = None
_UpperCamelCase = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_UpperCamelCase = min(array.shape[0] for array in raw_audio )
_UpperCamelCase = int(np.floor(max_length / self.chunk_stride ) )
_UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_UpperCamelCase = max(array.shape[0] for array in raw_audio )
_UpperCamelCase = int(np.ceil(max_length / self.chunk_stride ) )
_UpperCamelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
_UpperCamelCase = '''max_length'''
else:
_UpperCamelCase = input_values
# normal padding on batch
if padded_inputs is None:
_UpperCamelCase = self.pad(
__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , padding=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
if padding:
_UpperCamelCase = padded_inputs.pop('''attention_mask''' )
_UpperCamelCase = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
_UpperCamelCase = example[..., None]
input_values.append(example.T )
_UpperCamelCase = input_values
if return_tensors is not None:
_UpperCamelCase = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
| 256 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase__ : Any = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase__ : int = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase__ : List[Any] = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if version.parse(scb.__version__) < version.parse('1.4.12'):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Sequence(datasets.Value('string' , id='sequence') , id='references'),
}) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , ):
'''simple docstring'''
__A : Union[str, Any] = len(references[0])
if any(len(_UpperCAmelCase) != references_per_prediction for refs in references):
raise ValueError('Sacrebleu requires the same number of references for each prediction')
__A : Optional[Any] = [[refs[i] for refs in references] for i in range(_UpperCAmelCase)]
__A : List[str] = TER(
normalized=_UpperCAmelCase , no_punct=_UpperCAmelCase , asian_support=_UpperCAmelCase , case_sensitive=_UpperCAmelCase , )
__A : Union[str, Any] = sb_ter.corpus_score(_UpperCAmelCase , _UpperCAmelCase)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length} | 190 |
'''simple docstring'''
lowercase__ : Any = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowercase__ : List[Any] = ['''a''', '''b''', '''c''', '''d''', '''e''']
def _lowerCAmelCase ( __snake_case : str , __snake_case : Tuple , __snake_case : int ) -> Tuple:
__A : List[str] = start
# add current to visited
visited.append(__snake_case )
__A : Optional[int] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__A : int = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
__A : Dict = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
lowercase__ : Tuple = topological_sort('''a''', [], [])
print(sort) | 190 | 1 |
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = F"""Expected string as input, found {type(SCREAMING_SNAKE_CASE_ )}"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = F"""Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE_ )}"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = input_str.split("_" )
__lowerCAmelCase = 0 if use_pascal else 1
__lowerCAmelCase = words[start_index:]
__lowerCAmelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
__lowerCAmelCase = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 92 |
def _SCREAMING_SNAKE_CASE ( lowercase : list ):
'''simple docstring'''
for i in range(len(lowercase ) - 1 , 0 , -1 ):
lowerCamelCase_ = False
for j in range(lowercase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCamelCase_ , lowerCamelCase_ = unsorted[j - 1], unsorted[j]
lowerCamelCase_ = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
lowerCamelCase_ , lowerCamelCase_ = unsorted[j + 1], unsorted[j]
lowerCamelCase_ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase : Dict = [int(item) for item in user_input.split(",")]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 204 | 0 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ("foo.json",)] )
def A_ ( self : List[Any], _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GenerationConfig(
do_sample=_UpperCAmelCase, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase, config_name=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = GenerationConfig.from_pretrained(_UpperCAmelCase, config_name=_UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample, _UpperCAmelCase )
self.assertEqual(loaded_config.temperature, 0.7 )
self.assertEqual(loaded_config.length_penalty, 1.0 )
self.assertEqual(loaded_config.bad_words_ids, [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k, 5_0 )
self.assertEqual(loaded_config.max_length, 2_0 )
self.assertEqual(loaded_config.max_time, _UpperCAmelCase )
def A_ ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoConfig.from_pretrained("gpt2" )
SCREAMING_SNAKE_CASE__ : int = GenerationConfig.from_model_config(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_UpperCAmelCase, _UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id, default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id, model_config.eos_token_id )
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = GenerationConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = copy.deepcopy(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = generation_config.update(**_UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens, 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_UpperCAmelCase, {"foo": "bar"} )
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = GenerationConfig()
SCREAMING_SNAKE_CASE__ : Any = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = GenerationConfig.from_pretrained(_UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo, "bar" )
SCREAMING_SNAKE_CASE__ : Any = GenerationConfig.from_model_config(_UpperCAmelCase )
assert not hasattr(_UpperCAmelCase, "foo" ) # no new kwargs should be initialized if from config
def A_ ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = GenerationConfig()
self.assertEqual(default_config.temperature, 1.0 )
self.assertEqual(default_config.do_sample, _UpperCAmelCase )
self.assertEqual(default_config.num_beams, 1 )
SCREAMING_SNAKE_CASE__ : int = GenerationConfig(
do_sample=_UpperCAmelCase, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], )
self.assertEqual(config.temperature, 0.7 )
self.assertEqual(config.do_sample, _UpperCAmelCase )
self.assertEqual(config.num_beams, 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GenerationConfig.from_pretrained(_UpperCAmelCase, temperature=1.0 )
self.assertEqual(loaded_config.temperature, 1.0 )
self.assertEqual(loaded_config.do_sample, _UpperCAmelCase )
self.assertEqual(loaded_config.num_beams, 1 ) # default value
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def A_ ( cls : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def A_ ( cls : str ) -> Any:
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = GenerationConfig(
do_sample=_UpperCAmelCase, temperature=0.7, length_penalty=1.0, )
config.push_to_hub("test-generation-config", use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : List[str] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCAmelCase, repo_id="test-generation-config", push_to_hub=_UpperCAmelCase, use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = GenerationConfig(
do_sample=_UpperCAmelCase, temperature=0.7, length_penalty=1.0, )
config.push_to_hub("valid_org/test-generation-config-org", use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCAmelCase, repo_id="valid_org/test-generation-config-org", push_to_hub=_UpperCAmelCase, use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[Any] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
| 360 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "AutoTokenizer"
UpperCAmelCase_ = ["tokenizer"]
UpperCAmelCase_ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Union[str, Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = speaker_embeddings
@classmethod
def A_ ( cls : Any, _UpperCAmelCase : List[str], _UpperCAmelCase : Dict="speaker_embeddings_path.json", **_UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
SCREAMING_SNAKE_CASE__ : Any = get_file_from_repo(
_UpperCAmelCase, _UpperCAmelCase, subfolder=kwargs.pop("subfolder", _UpperCAmelCase ), cache_dir=kwargs.pop("cache_dir", _UpperCAmelCase ), force_download=kwargs.pop("force_download", _UpperCAmelCase ), proxies=kwargs.pop("proxies", _UpperCAmelCase ), resume_download=kwargs.pop("resume_download", _UpperCAmelCase ), local_files_only=kwargs.pop("local_files_only", _UpperCAmelCase ), use_auth_token=kwargs.pop("use_auth_token", _UpperCAmelCase ), revision=kwargs.pop("revision", _UpperCAmelCase ), )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(_UpperCAmelCase, _UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
SCREAMING_SNAKE_CASE__ : Dict = None
else:
with open(_UpperCAmelCase ) as speaker_embeddings_json:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = json.load(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
return cls(tokenizer=_UpperCAmelCase, speaker_embeddings=_UpperCAmelCase )
def A_ ( self : str, _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[str]="speaker_embeddings_path.json", _UpperCAmelCase : Optional[Any]="speaker_embeddings", _UpperCAmelCase : bool = False, **_UpperCAmelCase : List[str], ) -> Union[str, Any]:
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_UpperCAmelCase, _UpperCAmelCase, "v2" ), exist_ok=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
SCREAMING_SNAKE_CASE__ : Any = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
SCREAMING_SNAKE_CASE__ : List[Any] = self._load_voice_preset(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"], _UpperCAmelCase, F'''{prompt_key}_{key}''' ), voice_preset[key], allow_pickle=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(_UpperCAmelCase, F'''{prompt_key}_{key}.npy''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = tmp_dict
with open(os.path.join(_UpperCAmelCase, _UpperCAmelCase ), "w" ) as fp:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
super().save_pretrained(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[Any], _UpperCAmelCase : str = None, **_UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.speaker_embeddings[voice_preset]
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
SCREAMING_SNAKE_CASE__ : List[Any] = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path", "/" ), voice_preset_paths[key], subfolder=kwargs.pop("subfolder", _UpperCAmelCase ), cache_dir=kwargs.pop("cache_dir", _UpperCAmelCase ), force_download=kwargs.pop("force_download", _UpperCAmelCase ), proxies=kwargs.pop("proxies", _UpperCAmelCase ), resume_download=kwargs.pop("resume_download", _UpperCAmelCase ), local_files_only=kwargs.pop("local_files_only", _UpperCAmelCase ), use_auth_token=kwargs.pop("use_auth_token", _UpperCAmelCase ), revision=kwargs.pop("revision", _UpperCAmelCase ), )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get("repo_or_path", "/" ), voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
SCREAMING_SNAKE_CASE__ : int = np.load(_UpperCAmelCase )
return voice_preset_dict
def A_ ( self : int, _UpperCAmelCase : Optional[dict] = None ) -> Optional[int]:
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key], np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : List[Any], _UpperCAmelCase : Optional[Any]=None, _UpperCAmelCase : Union[str, Any]=None, _UpperCAmelCase : Optional[int]="pt", _UpperCAmelCase : List[str]=2_5_6, _UpperCAmelCase : int=False, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Any=False, **_UpperCAmelCase : List[str], ) -> List[Any]:
"""simple docstring"""
if voice_preset is not None and not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
if (
isinstance(_UpperCAmelCase, _UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
SCREAMING_SNAKE_CASE__ : List[str] = self._load_voice_preset(_UpperCAmelCase )
else:
if isinstance(_UpperCAmelCase, _UpperCAmelCase ) and not voice_preset.endswith(".npz" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = voice_preset + ".npz"
SCREAMING_SNAKE_CASE__ : List[str] = np.load(_UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer(
_UpperCAmelCase, return_tensors=_UpperCAmelCase, padding="max_length", max_length=_UpperCAmelCase, return_attention_mask=_UpperCAmelCase, return_token_type_ids=_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, **_UpperCAmelCase, )
if voice_preset is not None:
SCREAMING_SNAKE_CASE__ : str = voice_preset
return encoded_text
| 191 | 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
a : Any = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
a : Dict = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_: Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_: Dict = numpy_to_pil(__a )
return images
def lowerCAmelCase_ (lowerCAmelCase__: Dict ):
"""simple docstring"""
if images.ndim == 3:
UpperCAmelCase_: Dict = images[None, ...]
UpperCAmelCase_: Tuple = (images * 2_5_5).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_: Dict = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
UpperCAmelCase_: Optional[Any] = [Image.fromarray(__a ) for image in images]
return pil_images
| 147 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = """▁"""
__lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
__lowerCAmelCase = {"""vinai/bartpho-syllable""": 1_0_2_4}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : str ,_a : str ,_a : Any ,_a : Any="<s>" ,_a : Dict="</s>" ,_a : int="</s>" ,_a : Union[str, Any]="<s>" ,_a : List[Any]="<unk>" ,_a : Optional[Any]="<pad>" ,_a : List[str]="<mask>" ,_a : Optional[Dict[str, Any]] = None ,**_a : int ,):
'''simple docstring'''
_a : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
_a : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
_a : Optional[int] = vocab_file
_a : Union[str, Any] = monolingual_vocab_file
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : Union[str, Any] = {}
_a : int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_a ) not in self.fairseq_tokens_to_ids:
_a : int = cnt
cnt += 1
with open(_a ,'r' ,encoding='utf-8' ) as f:
for line in f.readlines():
_a : str = line.strip().split()[0]
_a : Tuple = len(self.fairseq_tokens_to_ids )
if str(_a ) not in self.fairseq_tokens_to_ids:
_a : List[str] = len(self.fairseq_tokens_to_ids )
_a : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
_a : int = self.__dict__.copy()
_a : str = None
_a : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple ,_a : Tuple ):
'''simple docstring'''
_a : Tuple = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : List[str] = {}
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowercase ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Dict = [self.cls_token_id]
_a : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __lowercase ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : Tuple ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def __lowercase ( self : Union[str, Any] ,_a : Union[str, Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __lowercase ( self : Any ,_a : int ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __lowercase ( self : Tuple ,_a : Union[str, Any] ):
'''simple docstring'''
_a : str = ''.join(_a ).replace(_a ,' ' ).strip()
return out_string
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : int = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_a : int = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,'wb' ) as fi:
_a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_a ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,_a )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_a ,'w' ,encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(_a )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 271 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a ( UpperCAmelCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , "tf_padding" ) )
self.parent.assertTrue(hasattr(A_ , "depth_multiplier" ) )
class a :
def __init__( self , A_ , A_=13 , A_=3 , A_=32 , A_=0.25 , A_=8 , A_=8 , A_=6 , A_=32 , A_=True , A_=True , A_=True , A_="relu6" , A_=1280 , A_=0.1 , A_=0.02 , A_=True , A_=True , A_=10 , A_=None , ):
'''simple docstring'''
_UpperCAmelCase : Any = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Union[str, Any] = depth_multiplier
_UpperCAmelCase : Optional[int] = depth_divisible_by
_UpperCAmelCase : Optional[Any] = min_depth
_UpperCAmelCase : List[Any] = expand_ratio
_UpperCAmelCase : List[Any] = tf_padding
_UpperCAmelCase : List[str] = output_stride
_UpperCAmelCase : List[Any] = first_layer_is_expansion
_UpperCAmelCase : Tuple = finegrained_output
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : List[Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_UpperCAmelCase : Optional[int] = classifier_dropout_prob
_UpperCAmelCase : List[Any] = use_labels
_UpperCAmelCase : Tuple = is_training
_UpperCAmelCase : List[str] = num_labels
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : Optional[Any] = scope
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Dict = None
if self.use_labels:
_UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCAmelCase ( self ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : int = MobileNetVaModel(config=A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Optional[int] = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.num_labels
_UpperCAmelCase : Optional[Any] = MobileNetVaForImageClassification(A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : int = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.num_labels
_UpperCAmelCase : int = MobileNetVaForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Dict = model(A_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase : List[str] = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_lowercase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowercase = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = MobileNetVaModelTester(self )
_UpperCAmelCase : int = MobileNetVaConfigTester(self , config_class=A_ , has_text_modality=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Any = model_class(A_ )
_UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
_UpperCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(A_ , A_ , A_ ):
_UpperCAmelCase : Any = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(A_ , A_ ) )
_UpperCAmelCase : str = outputs.hidden_states
_UpperCAmelCase : Union[str, Any] = 16
self.assertEqual(len(A_ ) , A_ )
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Optional[int] = True
check_hidden_states_output(A_ , A_ , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Union[str, Any] = MobileNetVaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
_UpperCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(A_ )
_UpperCAmelCase : List[str] = self.default_image_processor
_UpperCAmelCase : Dict = prepare_img()
_UpperCAmelCase : Optional[Any] = image_processor(images=A_ , return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Any = model(**A_ )
# verify the logits
_UpperCAmelCase : int = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , A_ )
_UpperCAmelCase : str = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
_UpperCAmelCase : List[str] = model.to(A_ )
_UpperCAmelCase : int = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
_UpperCAmelCase : str = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(images=A_ , return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : int = model(**A_ )
_UpperCAmelCase : Tuple = outputs.logits
# verify the logits
_UpperCAmelCase : str = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , A_ )
_UpperCAmelCase : List[str] = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
| 354 |
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str ) -> bool:
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase ) + 1
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_UpperCAmelCase : List[str] = [[0 for i in range(lowerCAmelCase )] for j in range(lowerCAmelCase )]
# since string of zero length match pattern of zero length
_UpperCAmelCase : List[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowerCAmelCase ):
_UpperCAmelCase : Dict = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowerCAmelCase ):
_UpperCAmelCase : Tuple = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowerCAmelCase ):
for j in range(1 , lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_UpperCAmelCase : Optional[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_UpperCAmelCase : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_UpperCAmelCase : str = dp[i - 1][j]
else:
_UpperCAmelCase : int = 0
else:
_UpperCAmelCase : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
SCREAMING_SNAKE_CASE_ = 'aab'
SCREAMING_SNAKE_CASE_ = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 189 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
A_ = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : str = torch.load(snake_case__ , map_location="""cpu""" )
return sd
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : str , snake_case__ : int=rename_keys_prefix ):
"""simple docstring"""
_snake_case : Optional[int] = OrderedDict()
_snake_case : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_snake_case : Any = key
for name_pair in rename_keys_prefix:
_snake_case : str = new_key.replace(name_pair[0] , name_pair[1] )
_snake_case : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_snake_case : List[Any] = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), F"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
_snake_case : Union[str, Any] = """pretraining"""
if "vcr" in checkpoint_path:
_snake_case : int = {"""visual_embedding_dim""": 5_12}
elif "vqa_advanced" in checkpoint_path:
_snake_case : Optional[int] = {"""visual_embedding_dim""": 20_48}
elif "vqa" in checkpoint_path:
_snake_case : Any = {"""visual_embedding_dim""": 20_48}
elif "nlvr" in checkpoint_path:
_snake_case : List[Any] = {"""visual_embedding_dim""": 10_24}
else:
raise NotImplementedError(F"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
_snake_case : Dict = {"""visual_embedding_dim""": 5_12}
_snake_case : Union[str, Any] = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
_snake_case : Dict = {"""visual_embedding_dim""": 20_48}
_snake_case : Optional[Any] = """vqa_advanced"""
elif "vqa" in checkpoint_path:
_snake_case : Optional[Any] = {"""visual_embedding_dim""": 20_48, """num_labels""": 31_29}
_snake_case : List[str] = """vqa"""
elif "nlvr" in checkpoint_path:
_snake_case : List[Any] = {
"""visual_embedding_dim""": 10_24,
"""num_labels""": 2,
}
_snake_case : List[Any] = """nlvr"""
_snake_case : Optional[int] = VisualBertConfig(**snake_case__ )
# Load State Dict
_snake_case : List[str] = load_state_dict(snake_case__ )
_snake_case : Any = get_new_dict(snake_case__ , snake_case__ )
if model_type == "pretraining":
_snake_case : Union[str, Any] = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
_snake_case : Any = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
_snake_case : Optional[Any] = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
_snake_case : Optional[int] = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
A_ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 64 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any=7 ):
"""simple docstring"""
_snake_case : Any = None
if token is not None:
_snake_case : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_snake_case : List[str] = """636036"""
_snake_case : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_snake_case : str = requests.get(snake_case__ , headers=snake_case__ ).json()
return result["workflow_runs"]
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : str = get_daily_ci_runs(snake_case__ )
_snake_case : str = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_snake_case : List[str] = workflow_run["""id"""]
break
return workflow_run_id
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[Any] = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
_snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_snake_case : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = {}
for artifact_name in artifact_names:
_snake_case : int = os.path.join(snake_case__ , F"{artifact_name}.zip" )
if os.path.isfile(snake_case__ ):
_snake_case : Tuple = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
_snake_case : Any = f.read().decode("""UTF-8""" )
return results
| 64 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
for param in module.parameters():
lowercase_ : Optional[Any] = False
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase_ : Tuple = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : Any = plt.imshow(__SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(__SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(__SCREAMING_SNAKE_CASE )
plt.show()
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Optional[int] = datetime.now()
lowercase_ : int = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 264 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase_ : int = quote(__SCREAMING_SNAKE_CASE )
return hfh.hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' , revision=__SCREAMING_SNAKE_CASE )
| 264 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 234 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCamelCase__ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCamelCase__ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : int ) ->MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[List[List[str]]] , lowerCamelCase__ : List[List[str]] , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 4 , ) ->Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCamelCase__ , hypotheses=lowerCamelCase__ , min_len=lowerCamelCase__ , max_len=lowerCamelCase__ )
}
| 234 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self: Optional[Any] ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: str ):
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." ,__lowerCAmelCase ,)
super().__init__(*__lowerCAmelCase ,**__lowerCAmelCase ) | 359 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A_ ( _a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowerCamelCase : str = []
for i in range(_lowerCamelCase ):
_lowerCamelCase : Any = i / num_diffusion_timesteps
_lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class A_ ( _a , _a ):
@register_to_config
def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
_lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = 1.0 - self.betas
_lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 )
_lowerCamelCase : int = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowerCamelCase : Tuple = 1.0
# setable values
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() )
_lowerCamelCase : List[str] = variance_type
def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ):
'''simple docstring'''
return sample
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ):
'''simple docstring'''
_lowerCamelCase : str = num_inference_steps
_lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ):
'''simple docstring'''
if prev_timestep is None:
_lowerCamelCase : List[str] = t - 1
_lowerCamelCase : Optional[int] = self.alphas_cumprod[t]
_lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : List[Any] = self.betas[t]
else:
_lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowerCamelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) )
_lowerCamelCase : str = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowerCamelCase : str = variance.log()
_lowerCamelCase : str = beta.log()
_lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2
_lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 )
else:
_lowerCamelCase : List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowerCamelCase : List[Any] = t - 1
_lowerCamelCase : Dict = self.alphas_cumprod[t]
_lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : Any = self.betas[t]
_lowerCamelCase : str = self.alphas[t]
else:
_lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev
_lowerCamelCase : Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCamelCase : List[Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : Any = torch.clamp(
__lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCamelCase : Union[str, Any] = 0
if t > 0:
_lowerCamelCase : Dict = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device )
_lowerCamelCase : Any = self._get_variance(
__lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,)
if self.variance_type == "fixed_small_log":
_lowerCamelCase : Optional[Any] = variance
elif self.variance_type == "learned_range":
_lowerCamelCase : Optional[int] = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
_lowerCamelCase : Dict = variance * variance_noise
_lowerCamelCase : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,):
'''simple docstring'''
_lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
_lowerCamelCase : Any = timesteps.to(original_samples.device )
_lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5
_lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples | 340 | 0 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class A_ ( unittest.TestCase ):
@require_torch
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
__lowerCAmelCase: Union[str, Any] = load_dataset('ashraq/esc50' )
__lowerCAmelCase: List[str] = dataset['train']['audio'][-1]['array']
__lowerCAmelCase: str = audio_classifier(UpperCAmelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@slow
@require_torch
def UpperCAmelCase ( self : int ) -> int:
__lowerCAmelCase: Optional[int] = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
__lowerCAmelCase: Dict = load_dataset('ashraq/esc50' )
__lowerCAmelCase: Union[str, Any] = dataset['train']['audio'][-1]['array']
__lowerCAmelCase: str = audio_classifier(UpperCAmelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] , )
__lowerCAmelCase: List[Any] = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
__lowerCAmelCase: Optional[int] = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
pass
| 322 | """simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('fixtures/test_sentencepiece.model')
lowercase__ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowercase__ = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = CamembertTokenizer
a__ = CamembertTokenizerFast
a__ = True
a__ = True
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a__: Tuple = CamembertTokenizer(lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = '<pad>'
a__: List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: str = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>NOTUSED')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(lowercase) , 10_04)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_05)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Optional[Any] = CamembertTokenizer(lowercase)
tokenizer.save_pretrained(self.tmpdirname)
a__: List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname)
a__: Dict = 'I was born in 92000, and this is falsé.'
a__: Optional[int] = tokenizer.encode(lowercase)
a__: Any = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Optional[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: str = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
a__: Tuple = tokenizer.convert_ids_to_tokens(lowercase)
a__: Tuple = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__: Dict = self.get_tokenizer()
a__: str = self.get_rust_tokenizer()
a__: int = 'I was born in 92000, and this is falsé.'
a__: Optional[Any] = tokenizer.tokenize(lowercase)
a__: List[Any] = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: str = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: str = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Tuple = self.get_rust_tokenizer()
a__: Union[str, Any] = tokenizer.encode(lowercase)
a__: List[Any] = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = {'input_ids': [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
a__: int = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=lowercase , )
| 290 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class a ( unittest.TestCase ):
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = parent
def _UpperCAmelCase ( self ):
'''simple docstring'''
return {}
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
_UpperCAmelCase : Dict = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class a ( UpperCAmelCase , unittest.TestCase ):
_lowercase = MarkupLMFeatureExtractor if is_bsa_available() else None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = MarkupLMFeatureExtractionTester(self )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Any = self.feature_extraction_class()
# Test not batched input
_UpperCAmelCase : Optional[Any] = get_html_strings()[0]
_UpperCAmelCase : Union[str, Any] = feature_extractor(A_ )
# fmt: off
_UpperCAmelCase : Dict = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
_UpperCAmelCase : Tuple = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , A_ )
self.assertEqual(encoding.xpaths , A_ )
# Test batched
_UpperCAmelCase : str = get_html_strings()
_UpperCAmelCase : Any = feature_extractor(A_ )
# fmt: off
_UpperCAmelCase : str = expected_nodes + [["My First Heading", "My first paragraph."]]
_UpperCAmelCase : Optional[Any] = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , A_ )
self.assertEqual(encoding.xpaths , A_ )
| 189 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: int=False ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: List[str]=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase : Optional[Any] = ""
else:
_UpperCAmelCase : Dict = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : List[str] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_UpperCAmelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : Dict = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : int = in_proj_bias[-config.hidden_size :]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Optional[int]:
_UpperCAmelCase : Dict = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: Optional[int] , lowerCAmelCase: Dict ) -> Tuple:
_UpperCAmelCase : str = dct.pop(lowerCAmelCase )
_UpperCAmelCase : Any = val
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_UpperCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Tuple = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple , lowerCAmelCase: int , lowerCAmelCase: List[Any]=False ) -> Any:
_UpperCAmelCase : List[Any] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=lowerCAmelCase , )
_UpperCAmelCase : Optional[Any] = ViTHybridConfig(backbone_config=lowerCAmelCase , image_size=384 , num_labels=1000 )
_UpperCAmelCase : str = False
# load original model from timm
_UpperCAmelCase : Optional[Any] = timm.create_model(lowerCAmelCase , pretrained=lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase : str = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase )
_UpperCAmelCase : str = create_rename_keys(lowerCAmelCase , lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : str = "huggingface/label-files"
_UpperCAmelCase : Tuple = "imagenet-1k-id2label.json"
_UpperCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Any = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : Dict = idalabel
_UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase : Union[str, Any] = ViTHybridModel(lowerCAmelCase ).eval()
else:
_UpperCAmelCase : Optional[Any] = ViTHybridForImageClassification(lowerCAmelCase ).eval()
model.load_state_dict(lowerCAmelCase )
# create image processor
_UpperCAmelCase : Any = create_transform(**resolve_data_config({} , model=lowerCAmelCase ) )
_UpperCAmelCase : Tuple = transform.transforms
_UpperCAmelCase : Tuple = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_UpperCAmelCase : Any = ViTHybridImageProcessor(
do_resize=lowerCAmelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_UpperCAmelCase : List[str] = prepare_img()
_UpperCAmelCase : List[Any] = transform(lowerCAmelCase ).unsqueeze(0 )
_UpperCAmelCase : Optional[Any] = processor(lowerCAmelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase , lowerCAmelCase )
# verify logits
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase : List[Any] = timm_model.forward_features(lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_UpperCAmelCase : Any = timm_model(lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase )
if push_to_hub:
print(F'Pushing model and processor to the hub {vit_name}' )
model.push_to_hub(F'ybelkada/{vit_name}' )
processor.push_to_hub(F'ybelkada/{vit_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 189 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.