code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 382 | import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_snake_case = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = torch.load(snake_case__, map_location="cpu" )
return sd
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=rename_keys_prefix ) -> List[Any]:
__UpperCAmelCase : Optional[int] = OrderedDict()
__UpperCAmelCase : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__UpperCAmelCase : Optional[int] = key
for name_pair in rename_keys_prefix:
__UpperCAmelCase : List[Any] = new_key.replace(name_pair[0], name_pair[1] )
__UpperCAmelCase : Optional[Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__UpperCAmelCase : Optional[Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[Any]:
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__UpperCAmelCase : int = "pretraining"
if "vcr" in checkpoint_path:
__UpperCAmelCase : Optional[Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : int = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__UpperCAmelCase : Tuple = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : Optional[Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__UpperCAmelCase : Optional[int] = {"visual_embedding_dim": 512}
__UpperCAmelCase : List[str] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : Optional[int] = {"visual_embedding_dim": 2048}
__UpperCAmelCase : str = "vqa_advanced"
elif "vqa" in checkpoint_path:
__UpperCAmelCase : str = {"visual_embedding_dim": 2048, "num_labels": 3129}
__UpperCAmelCase : Union[str, Any] = "vqa"
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : str = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__UpperCAmelCase : Optional[int] = "nlvr"
__UpperCAmelCase : Optional[int] = VisualBertConfig(**snake_case__ )
# Load State Dict
__UpperCAmelCase : str = load_state_dict(snake_case__ )
__UpperCAmelCase : int = get_new_dict(snake_case__, snake_case__ )
if model_type == "pretraining":
__UpperCAmelCase : Union[str, Any] = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
__UpperCAmelCase : Union[str, Any] = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
__UpperCAmelCase : str = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
__UpperCAmelCase : int = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 382 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = '''mra'''
def __init__( self ,A=50_265 ,A=768 ,A=12 ,A=12 ,A=3_072 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=1 ,A=0.02 ,A=1e-5 ,A="absolute" ,A=4 ,A="full" ,A=0 ,A=0 ,A=1 ,A=0 ,A=2 ,**A ,):
super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = block_per_row
UpperCAmelCase = approx_mode
UpperCAmelCase = initial_prior_first_n_blocks
UpperCAmelCase = initial_prior_diagonal_n_blocks
| 708 |
"""simple docstring"""
from math import sqrt
def _a ( _snake_case = 100_0000 ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 74 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase = logging.get_logger(__name__)
class lowercase__ ( lowercase_ ):
__UpperCAmelCase = ['''input_features''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=1_6000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase)
_lowerCamelCase : Tuple = num_mel_bins
_lowerCamelCase : Optional[Any] = do_ceptral_normalize
_lowerCamelCase : Tuple = normalize_means
_lowerCamelCase : str = normalize_vars
_lowerCamelCase : int = True
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , ) -> str:
_lowerCamelCase : Any = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
_lowerCamelCase : Tuple = torch.from_numpy(_lowercase).unsqueeze(0)
_lowerCamelCase : Dict = ta_kaldi.fbank(_lowercase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> Tuple:
if normalize_means:
_lowerCamelCase : Optional[Any] = x[:input_length].mean(axis=0)
_lowerCamelCase : Tuple = np.subtract(_lowercase , _lowercase)
if normalize_vars:
_lowerCamelCase : Union[str, Any] = x[:input_length].std(axis=0)
_lowerCamelCase : str = np.divide(_lowercase , _lowercase)
if input_length < x.shape[0]:
_lowerCamelCase : Union[str, Any] = padding_value
# make sure array is in float32
_lowerCamelCase : List[Any] = x.astype(np.floataa)
return x
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple:
_lowerCamelCase : Optional[Any] = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_lowercase , _lowercase , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(_lowercase , _lowercase)
]
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> Any:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
_lowerCamelCase : Any = isinstance(_lowercase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
_lowerCamelCase : List[Any] = is_batched_numpy or (
isinstance(_lowercase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
_lowerCamelCase : int = [np.asarray(_lowercase , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray):
_lowerCamelCase : int = np.asarray(_lowercase , dtype=np.floataa)
elif isinstance(_lowercase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_lowerCamelCase : int = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_lowerCamelCase : Optional[Any] = [raw_speech]
# extract fbank features
_lowerCamelCase : Optional[int] = [self._extract_fbank_features(_lowercase) for waveform in raw_speech]
# convert into correct format for padding
_lowerCamelCase : int = BatchFeature({"""input_features""": features})
_lowerCamelCase : List[Any] = self.pad(
_lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
# make sure list is in array format
_lowerCamelCase : Union[str, Any] = padded_inputs.get("""input_features""")
if isinstance(input_features[0] , _lowercase):
_lowerCamelCase : int = [np.asarray(_lowercase , dtype=np.floataa) for feature in input_features]
_lowerCamelCase : Optional[int] = padded_inputs.get("""attention_mask""")
if attention_mask is not None:
_lowerCamelCase : List[str] = [np.asarray(_lowercase , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_lowerCamelCase : Dict = (
np.array(_lowercase , dtype=np.intaa)
if self._get_padding_strategies(_lowercase , max_length=_lowercase) is not PaddingStrategy.DO_NOT_PAD
else None
)
_lowerCamelCase : Tuple = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_lowercase)
if return_tensors is not None:
_lowerCamelCase : Optional[Any] = padded_inputs.convert_to_tensors(_lowercase)
return padded_inputs
| 88 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'van'
def __init__( self :Optional[Any] , _lowercase :Dict=2_24 , _lowercase :Union[str, Any]=3 , _lowercase :List[Any]=[7, 3, 3, 3] , _lowercase :Any=[4, 2, 2, 2] , _lowercase :Union[str, Any]=[64, 1_28, 3_20, 5_12] , _lowercase :List[Any]=[3, 3, 12, 3] , _lowercase :Dict=[8, 8, 4, 4] , _lowercase :int="gelu" , _lowercase :List[Any]=0.02 , _lowercase :List[Any]=1e-6 , _lowercase :Any=1e-2 , _lowercase :int=0.0 , _lowercase :int=0.0 , **_lowercase :Dict , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = mlp_ratios
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = dropout_rate
| 655 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : str = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
A = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
A = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :List[Any] , __UpperCamelCase :MultilingualCLIP , __UpperCamelCase :XLMRobertaTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, DDPMScheduler] , __UpperCamelCase :VQModel , ):
super().__init__()
self.register_modules(
text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str , __UpperCamelCase :List[str] , __UpperCamelCase :List[Any] , __UpperCamelCase :Union[str, Any] ):
if latents is None:
A = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
A = latents.to(__UpperCamelCase )
A = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self :str , __UpperCamelCase :Dict , __UpperCamelCase :int , __UpperCamelCase :Tuple , __UpperCamelCase :Dict , __UpperCamelCase :Union[str, Any]=None , ):
A = len(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else 1
# get prompt text embeddings
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , truncation=__UpperCamelCase , max_length=77 , return_attention_mask=__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors="pt" , )
A = text_inputs.input_ids
A = self.tokenizer(__UpperCamelCase , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__UpperCamelCase , __UpperCamelCase ):
A = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A = text_input_ids.to(__UpperCamelCase )
A = text_inputs.attention_mask.to(__UpperCamelCase )
A, A = self.text_encoder(
input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
A = prompt_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = text_encoder_hidden_states.repeat_interleave(__UpperCamelCase , dim=0 )
A = text_mask.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
A = 42
if negative_prompt is None:
A = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
f" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A = negative_prompt
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=77 , truncation=__UpperCamelCase , return_attention_mask=__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors="pt" , )
A = uncond_input.input_ids.to(__UpperCamelCase )
A = uncond_input.attention_mask.to(__UpperCamelCase )
A, A = self.text_encoder(
input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A = negative_prompt_embeds.shape[1]
A = negative_prompt_embeds.repeat(1 , __UpperCamelCase )
A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCamelCase )
A = uncond_text_encoder_hidden_states.shape[1]
A = uncond_text_encoder_hidden_states.repeat(1 , __UpperCamelCase , 1 )
A = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
A = uncond_text_mask.repeat_interleave(__UpperCamelCase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([negative_prompt_embeds, prompt_embeds] )
A = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
A = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
A = torch.device(f"cuda:{gpu_id}" )
A = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Dict , __UpperCamelCase :List[str]=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
A = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
A, A = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
if self.safety_checker is not None:
A, A = cpu_offload_with_hook(self.safety_checker , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self :str ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self :Optional[Any] , __UpperCamelCase :Union[str, List[str]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :float = 4.0 , __UpperCamelCase :int = 1 , __UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = len(__UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
A = self._execution_device
A = batch_size * num_images_per_prompt
A = guidance_scale > 1.0
A, A, A = self._encode_prompt(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = torch.cat(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
A = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
A = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=__UpperCamelCase )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
A = self.scheduler.timesteps
A = self.unet.config.in_channels
A, A = get_new_h_w(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
# create initial latent
A = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
A = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
A, A = noise_pred.split(latents.shape[1] , dim=1 )
A, A = noise_pred.chunk(2 )
A, A = variance_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A, A = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , ).prev_sample
# post-processing
A = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A = image * 0.5 + 0.5
A = image.clamp(0 , 1 )
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 524 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Dict = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 524 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCamelCase_( _UpperCAmelCase ):
'''simple docstring'''
lowercase__ : List[Any] = '''dandelin/vilt-b32-finetuned-vqa'''
lowercase__ : int = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
lowercase__ : int = '''image_qa'''
lowercase__ : int = AutoProcessor
lowercase__ : List[Any] = AutoModelForVisualQuestionAnswering
lowercase__ : Optional[int] = ['''image''', '''text''']
lowercase__ : Optional[Any] = ['''text''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
requires_backends(self , ['''vision'''] )
super().__init__(*A_ , **A_ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
return self.pre_processor(A_ , A_ , return_tensors='''pt''' )
def snake_case__ ( self , lowerCamelCase__ ):
with torch.no_grad():
return self.model(**A_ ).logits
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 661 | """simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _A :
"""simple docstring"""
def __init__( self : Dict , A_ : List[Any] , A_ : Dict=13 , A_ : Any=7 , A_ : Optional[int]=6 , A_ : str=17 , A_ : Optional[Any]=23 , A_ : List[str]=11 , A_ : Tuple=True , ) -> str:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = act_dim
__snake_case = state_dim
__snake_case = hidden_size
__snake_case = max_length
__snake_case = is_training
def lowercase ( self : Optional[Any] ) -> List[str]:
__snake_case = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__snake_case = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__snake_case = floats_tensor((self.batch_size, self.seq_length, 1) )
__snake_case = floats_tensor((self.batch_size, self.seq_length, 1) )
__snake_case = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
__snake_case = random_attention_mask((self.batch_size, self.seq_length) )
__snake_case = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowercase ( self : List[Any] ) -> int:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowercase ( self : Optional[int] , A_ : List[str] , A_ : Optional[Any] , A_ : int , A_ : int , A_ : int , A_ : Union[str, Any] , A_ : List[Any] , ) -> Any:
__snake_case = DecisionTransformerModel(config=A_ )
model.to(A_ )
model.eval()
__snake_case = model(A_ , A_ , A_ , A_ , A_ , A_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowercase ( self : Dict ) -> List[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class _A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : int = (DecisionTransformerModel,) if is_torch_available() else ()
UpperCamelCase_ : List[str] = ()
UpperCamelCase_ : int = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
UpperCamelCase_ : str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
UpperCamelCase_ : int = False
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Any = False
UpperCamelCase_ : Dict = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : List[Any] = False
def lowercase ( self : str ) -> Optional[Any]:
__snake_case = DecisionTransformerModelTester(self )
__snake_case = ConfigTester(self , config_class=A_ , hidden_size=37 )
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
@slow
def lowercase ( self : str ) -> List[str]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = DecisionTransformerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def lowercase ( self : Tuple ) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(A_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(A_ )] , A_ )
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Any ) -> Optional[Any]:
__snake_case = 2 # number of steps of autoregressive prediction we will perform
__snake_case = 10 # defined by the RL environment, may be normalized
__snake_case = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
__snake_case = model.to(A_ )
__snake_case = model.config
torch.manual_seed(0 )
__snake_case = torch.randn(1 , 1 , config.state_dim ).to(device=A_ , dtype=torch.floataa ) # env.reset()
__snake_case = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=A_ )
__snake_case = torch.tensor(A_ , device=A_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__snake_case = state
__snake_case = torch.zeros(1 , 0 , config.act_dim , device=A_ , dtype=torch.floataa )
__snake_case = torch.zeros(1 , 0 , device=A_ , dtype=torch.floataa )
__snake_case = torch.tensor(0 , device=A_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(A_ ):
__snake_case = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=A_ )] , dim=1 )
__snake_case = torch.cat([rewards, torch.zeros(1 , 1 , device=A_ )] , dim=1 )
__snake_case = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__snake_case , __snake_case , __snake_case = model(
states=A_ , actions=A_ , rewards=A_ , returns_to_go=A_ , timesteps=A_ , attention_mask=A_ , return_dict=A_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
__snake_case , __snake_case , __snake_case , __snake_case = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=A_ , dtype=torch.floataa ),
1.0,
False,
{},
)
__snake_case = action_pred[0, -1]
__snake_case = torch.cat([states, state] , dim=1 )
__snake_case = returns_to_go[0, -1] - reward
__snake_case = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__snake_case = torch.cat(
[timesteps, torch.ones((1, 1) , device=A_ , dtype=torch.long ) * (step + 1)] , dim=1 ) | 564 | 0 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = torch.exp(_SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.sum(_SCREAMING_SNAKE_CASE , dim=1 ) # sum of exp(x_i)
UpperCamelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_SCREAMING_SNAKE_CASE ) - B / A
class _lowerCamelCase ( nn.Module ):
def __init__(self , __a ) -> Optional[Any]:
super().__init__()
UpperCamelCase = config.output_attentions
UpperCamelCase = config.output_hidden_states
UpperCamelCase = nn.ModuleList([BertLayer(__a ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = nn.ModuleList([BertHighway(__a ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = [-1 for _ in range(config.num_hidden_layers )]
def snake_case_ (self , __a ) -> Union[str, Any]:
if (type(__a ) is float) or (type(__a ) is int):
for i in range(len(self.early_exit_entropy ) ):
UpperCamelCase = x
else:
UpperCamelCase = x
def snake_case_ (self , __a ) -> Any:
UpperCamelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def snake_case_ (self , __a , __a=None , __a=None , __a=None , __a=None , ) -> int:
UpperCamelCase = ()
UpperCamelCase = ()
UpperCamelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = layer_module(
__a , __a , head_mask[i] , __a , __a )
UpperCamelCase = layer_outputs[0]
if self.output_attentions:
UpperCamelCase = all_attentions + (layer_outputs[1],)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = current_outputs + (all_attentions,)
UpperCamelCase = self.highway[i](__a )
# logits, pooled_output
if not self.training:
UpperCamelCase = highway_exit[0]
UpperCamelCase = entropy(__a )
UpperCamelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
UpperCamelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
UpperCamelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__a , i + 1 )
else:
UpperCamelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = outputs + (all_attentions,)
UpperCamelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , _lowercase , )
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a ) -> int:
super().__init__(__a )
UpperCamelCase = config
UpperCamelCase = BertEmbeddings(__a )
UpperCamelCase = DeeBertEncoder(__a )
UpperCamelCase = BertPooler(__a )
self.init_weights()
def snake_case_ (self ) -> Union[str, Any]:
self.encoder.init_highway_pooler(self.pooler )
def snake_case_ (self ) -> Optional[int]:
return self.embeddings.word_embeddings
def snake_case_ (self , __a ) -> Tuple:
UpperCamelCase = value
def snake_case_ (self , __a ) -> Any:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__a )
@add_start_docstrings_to_model_forward(__a )
def snake_case_ (self , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ) -> List[Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCamelCase = input_ids.size()
elif inputs_embeds is not None:
UpperCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCamelCase = torch.ones(__a , device=__a )
if encoder_attention_mask is None:
UpperCamelCase = torch.ones(__a , device=__a )
if token_type_ids is None:
UpperCamelCase = torch.zeros(__a , dtype=torch.long , device=__a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCamelCase = self.get_extended_attention_mask(__a , __a , __a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
UpperCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
UpperCamelCase = encoder_attention_mask[:, None, None, :]
UpperCamelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
UpperCamelCase = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCamelCase = self.get_head_mask(__a , self.config.num_hidden_layers )
UpperCamelCase = self.embeddings(
input_ids=__a , position_ids=__a , token_type_ids=__a , inputs_embeds=__a )
UpperCamelCase = self.encoder(
__a , attention_mask=__a , head_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(__a )
UpperCamelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a ) -> Any:
UpperCamelCase = message
UpperCamelCase = exit_layer # start from 1!
class _lowerCamelCase ( nn.Module ):
def __init__(self , __a ) -> Tuple:
super().__init__()
UpperCamelCase = BertPooler(__a )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , config.num_labels )
def snake_case_ (self , __a ) -> List[str]:
# Pooler
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(__a )
# "return" pooler_output
# BertModel
UpperCamelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
UpperCamelCase = bmodel_output[1]
UpperCamelCase = self.dropout(__a )
UpperCamelCase = self.classifier(__a )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , _lowercase , )
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a ) -> int:
super().__init__(__a )
UpperCamelCase = config.num_labels
UpperCamelCase = config.num_hidden_layers
UpperCamelCase = DeeBertModel(__a )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__a )
def snake_case_ (self , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , __a=-1 , __a=False , ) -> str:
UpperCamelCase = self.num_layers
try:
UpperCamelCase = self.bert(
__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
UpperCamelCase = outputs[1]
UpperCamelCase = self.dropout(__a )
UpperCamelCase = self.classifier(__a )
UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCamelCase = e.message
UpperCamelCase = e.exit_layer
UpperCamelCase = outputs[0]
if not self.training:
UpperCamelCase = entropy(__a )
UpperCamelCase = []
UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCamelCase = []
for highway_exit in outputs[-1]:
UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(__a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__a )
if train_highway:
UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCamelCase = (loss,) + outputs
if not self.training:
UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 544 |
"""simple docstring"""
import string
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
UpperCamelCase = ""
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCamelCase = string.ascii_uppercase.find(_SCREAMING_SNAKE_CASE )
UpperCamelCase = num - key
if num < 0:
UpperCamelCase = num + len(string.ascii_uppercase )
UpperCamelCase = translated + string.ascii_uppercase[num]
else:
UpperCamelCase = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def a__ ( ):
"""simple docstring"""
UpperCamelCase = input("Encrypted message: " )
UpperCamelCase = message.upper()
decrypt(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 544 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __A , unittest.TestCase ):
A_ : Optional[Any] = AudioLDMPipeline
A_ : Any = TEXT_TO_AUDIO_PARAMS
A_ : Tuple = TEXT_TO_AUDIO_BATCH_PARAMS
A_ : Any = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__UpperCamelCase , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
A = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
A = ClapTextModelWithProjection(__UpperCamelCase )
A = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 )
A = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__UpperCamelCase , )
A = SpeechTaHifiGan(__UpperCamelCase )
A = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any]=0 ) -> Union[str, Any]:
if str(__UpperCamelCase ).startswith('mps' ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def __UpperCamelCase ( self : str ) -> Dict:
A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = AudioLDMPipeline(**__UpperCamelCase )
A = audioldm_pipe.to(__UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs(__UpperCamelCase )
A = audioldm_pipe(**__UpperCamelCase )
A = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCamelCase ) == 256
A = audio[:10]
A = np.array(
[-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
A = self.get_dummy_components()
A = AudioLDMPipeline(**__UpperCamelCase )
A = audioldm_pipe.to(__UpperCamelCase )
A = audioldm_pipe.to(__UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs(__UpperCamelCase )
A = 3 * [inputs['''prompt''']]
# forward
A = audioldm_pipe(**__UpperCamelCase )
A = output.audios[0]
A = self.get_dummy_inputs(__UpperCamelCase )
A = 3 * [inputs.pop('prompt' )]
A = audioldm_pipe.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors='pt' , )
A = text_inputs['''input_ids'''].to(__UpperCamelCase )
A = audioldm_pipe.text_encoder(
__UpperCamelCase , )
A = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
A = F.normalize(__UpperCamelCase , dim=-1 )
A = prompt_embeds
# forward
A = audioldm_pipe(**__UpperCamelCase )
A = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
A = self.get_dummy_components()
A = AudioLDMPipeline(**__UpperCamelCase )
A = audioldm_pipe.to(__UpperCamelCase )
A = audioldm_pipe.to(__UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs(__UpperCamelCase )
A = 3 * ['''this is a negative prompt''']
A = negative_prompt
A = 3 * [inputs['''prompt''']]
# forward
A = audioldm_pipe(**__UpperCamelCase )
A = output.audios[0]
A = self.get_dummy_inputs(__UpperCamelCase )
A = 3 * [inputs.pop('prompt' )]
A = []
for p in [prompt, negative_prompt]:
A = audioldm_pipe.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors='pt' , )
A = text_inputs['''input_ids'''].to(__UpperCamelCase )
A = audioldm_pipe.text_encoder(
__UpperCamelCase , )
A = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
A = F.normalize(__UpperCamelCase , dim=-1 )
embeds.append(__UpperCamelCase )
A = embeds
# forward
A = audioldm_pipe(**__UpperCamelCase )
A = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
A = AudioLDMPipeline(**__UpperCamelCase )
A = audioldm_pipe.to(__UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs(__UpperCamelCase )
A = '''egg cracking'''
A = audioldm_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
A = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCamelCase ) == 256
A = audio[:10]
A = np.array(
[-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
A = AudioLDMPipeline(**__UpperCamelCase )
A = audioldm_pipe.to(__UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
A = audioldm_pipe(__UpperCamelCase , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
A = 2
A = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
A = 2
A = audioldm_pipe(__UpperCamelCase , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCamelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
A = 2
A = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCamelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __UpperCamelCase ( self : Tuple ) -> str:
A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = AudioLDMPipeline(**__UpperCamelCase )
A = audioldm_pipe.to(__UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = audioldm_pipe.vocoder.config.sampling_rate
A = self.get_dummy_inputs(__UpperCamelCase )
A = audioldm_pipe(audio_length_in_s=0.0_1_6 , **__UpperCamelCase )
A = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCamelCase ) / vocoder_sampling_rate == 0.0_1_6
A = audioldm_pipe(audio_length_in_s=0.0_3_2 , **__UpperCamelCase )
A = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCamelCase ) / vocoder_sampling_rate == 0.0_3_2
def __UpperCamelCase ( self : str ) -> str:
A = self.get_dummy_components()
A = AudioLDMPipeline(**__UpperCamelCase )
A = audioldm_pipe.to(__UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = ['''hey''']
A = audioldm_pipe(__UpperCamelCase , num_inference_steps=1 )
A = output.audios.shape
assert audio_shape == (1, 256)
A = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
A = SpeechTaHifiGan(__UpperCamelCase ).to(__UpperCamelCase )
A = audioldm_pipe(__UpperCamelCase , num_inference_steps=1 )
A = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __UpperCamelCase ( self : List[Any] ) -> str:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
self._test_inference_batch_single_identical(test_mean_pixel_difference=__UpperCamelCase )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCamelCase )
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any]="cpu" , __UpperCamelCase : Union[str, Any]=torch.floataa , __UpperCamelCase : List[str]=0 ) -> str:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = np.random.RandomState(__UpperCamelCase ).standard_normal((1, 8, 128, 16) )
A = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase )
A = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
A = audioldm_pipe.to(__UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_inputs(__UpperCamelCase )
A = 25
A = audioldm_pipe(**__UpperCamelCase ).audios[0]
assert audio.ndim == 1
assert len(__UpperCamelCase ) == 81_920
A = audio[77_230:77_240]
A = np.array(
[-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] )
A = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
A = audioldm_pipe.to(__UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_inputs(__UpperCamelCase )
A = audioldm_pipe(**__UpperCamelCase ).audios[0]
assert audio.ndim == 1
assert len(__UpperCamelCase ) == 81_920
A = audio[27_780:27_790]
A = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] )
A = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2 | 106 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( __A : list[list[int]] ) -> int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__A ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__A ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | 0 |
import requests
from bsa import BeautifulSoup
def A_ ( snake_case : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__UpperCamelCase = BeautifulSoup(requests.get(_lowerCamelCase ).text , '''html.parser''' )
__UpperCamelCase = soup.findAll('''h1''' )
__UpperCamelCase = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F"{key}\n{value}\n")
| 700 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCamelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , )-> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__UpperCamelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
__UpperCamelCase = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__UpperCamelCase = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCamelCase = self.scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , use_clipped_model_output=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
__UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 451 | 0 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = None
_UpperCamelCase = BloomTokenizerFast
_UpperCamelCase = BloomTokenizerFast
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = "tokenizer_file"
_UpperCamelCase = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().setUp()
a_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase__ ) -> Any:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.get_rust_tokenizer()
a_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
a_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
a_ = tokenizer.batch_encode_plus(UpperCAmelCase__ )['input_ids']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__=6 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
a_ = 'This is a simple input'
a_ = ['This is a simple input 1', 'This is a simple input 2']
a_ = ('This is a simple input', 'This is a pair')
a_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
a_ = None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='max_length' , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.get_rust_tokenizer()
a_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=UpperCAmelCase__ )
a_ = next(iter(UpperCAmelCase__ ) )['premise'] # pick up one data
a_ = list(sample_data.values() )
a_ = list(map(tokenizer.encode , UpperCAmelCase__ ) )
a_ = [tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 697 |
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'ClapFeatureExtractor'
_a = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : int=None , lowerCAmelCase : str=None , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Tuple ):
lowerCAmelCase = kwargs.pop("""sampling_rate""" , lowerCAmelCase )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
lowerCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if audios is not None:
lowerCAmelCase = self.feature_extractor(
lowerCAmelCase , sampling_rate=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if text is not None and audios is not None:
lowerCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def __lowercase ( self : Union[str, Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Optional[Any] ):
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def __lowercase ( self : Tuple , *lowerCAmelCase : str , **lowerCAmelCase : str ):
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 719 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'autoformer'
_a = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Dict , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : str = "student_t" , lowerCAmelCase : str = "nll" , lowerCAmelCase : int = 1 , lowerCAmelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase : bool = True , lowerCAmelCase : int = 0 , lowerCAmelCase : int = 0 , lowerCAmelCase : int = 0 , lowerCAmelCase : int = 0 , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : int = 64 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 32 , lowerCAmelCase : int = 32 , lowerCAmelCase : str = "gelu" , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : int = 100 , lowerCAmelCase : float = 0.02 , lowerCAmelCase : bool = True , lowerCAmelCase : Tuple=True , lowerCAmelCase : int = 10 , lowerCAmelCase : int = 25 , lowerCAmelCase : int = 3 , **lowerCAmelCase : Tuple , ):
# time series specific configuration
lowerCAmelCase = prediction_length
lowerCAmelCase = context_length if context_length is not None else prediction_length
lowerCAmelCase = distribution_output
lowerCAmelCase = loss
lowerCAmelCase = input_size
lowerCAmelCase = num_time_features
lowerCAmelCase = lags_sequence
lowerCAmelCase = scaling
lowerCAmelCase = num_dynamic_real_features
lowerCAmelCase = num_static_real_features
lowerCAmelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase = cardinality
else:
lowerCAmelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase = embedding_dimension
else:
lowerCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase = d_model
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = decoder_layers
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = use_cache
# Autoformer
lowerCAmelCase = label_length
lowerCAmelCase = moving_average
lowerCAmelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase )
@property
def __lowercase ( self : Tuple ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 529 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : list[list[int]] = []
__SCREAMING_SNAKE_CASE : list[int] = []
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Optional[Any] = sum(__SCREAMING_SNAKE_CASE )
create_state_space_tree(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return result
def A_ ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int , ) -> Union[str, Any]:
if sum(__SCREAMING_SNAKE_CASE ) > max_sum or (remaining_nums_sum + sum(__SCREAMING_SNAKE_CASE )) < max_sum:
return
if sum(__SCREAMING_SNAKE_CASE ) == max_sum:
result.append(__SCREAMING_SNAKE_CASE )
return
for index in range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ):
create_state_space_tree(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 , [*path, nums[index]] , __SCREAMING_SNAKE_CASE , remaining_nums_sum - nums[index] , )
_A = [3, 34, 4, 12, 5, 2]
_A = 9
_A = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 158 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class a__ ( UpperCamelCase_ ):
def __init__( self : Optional[Any] ,a__ : Union[List[ControlNetModel], Tuple[ControlNetModel]]) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_lowerCAmelCase:Dict = nn.ModuleList(a__)
def __UpperCamelCase ( self : Optional[Any] ,a__ : torch.FloatTensor ,a__ : Union[torch.Tensor, float, int] ,a__ : torch.Tensor ,a__ : List[torch.tensor] ,a__ : List[float] ,a__ : Optional[torch.Tensor] = None ,a__ : Optional[torch.Tensor] = None ,a__ : Optional[torch.Tensor] = None ,a__ : Optional[Dict[str, Any]] = None ,a__ : bool = False ,a__ : bool = True ,) -> Union[ControlNetOutput, Tuple]:
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a__ ,a__ ,self.nets)):
_lowerCAmelCase , _lowerCAmelCase:Dict = controlnet(
a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,a__ ,)
# merge samples
if i == 0:
_lowerCAmelCase , _lowerCAmelCase:Any = down_samples, mid_sample
else:
_lowerCAmelCase:str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a__ ,a__)
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __UpperCamelCase ( self : str ,a__ : Union[str, os.PathLike] ,a__ : bool = True ,a__ : Callable = None ,a__ : bool = False ,a__ : Optional[str] = None ,) -> Dict:
"""simple docstring"""
_lowerCAmelCase:List[Any] = 0
_lowerCAmelCase:Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a__ ,is_main_process=a__ ,save_function=a__ ,safe_serialization=a__ ,variant=a__ ,)
idx += 1
_lowerCAmelCase:int = model_path_to_save + F'_{idx}'
@classmethod
def __UpperCamelCase ( cls : Tuple ,a__ : Optional[Union[str, os.PathLike]] ,**a__ : int) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:List[str] = 0
_lowerCAmelCase:Optional[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_lowerCAmelCase:str = pretrained_model_path
while os.path.isdir(a__):
_lowerCAmelCase:Tuple = ControlNetModel.from_pretrained(a__ ,**a__)
controlnets.append(a__)
idx += 1
_lowerCAmelCase:Optional[int] = pretrained_model_path + F'_{idx}'
logger.info(F'{len(a__)} controlnets loaded from {pretrained_model_path}.')
if len(a__) == 0:
raise ValueError(
F'No ControlNets found under {os.path.dirname(a__)}. Expected at least {pretrained_model_path + "_0"}.')
return cls(a__)
| 227 | 0 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def a__ ( _SCREAMING_SNAKE_CASE = 5_000 ):
"""simple docstring"""
UpperCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , _SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase = pentagonal_nums[j]
UpperCamelCase = pentagonal_i + pentagonal_j
UpperCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(_SCREAMING_SNAKE_CASE ) and is_pentagonal(_SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 713 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _lowerCamelCase :
pass
| 544 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__a = 'scheduler_config.json'
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[Any] = 1
a :Any = 2
a :Dict = 3
a :Any = 4
a :Any = 5
@dataclass
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :jnp.ndarray
class lowercase__:
"""simple docstring"""
a :Dict = SCHEDULER_CONFIG_NAME
a :str = ['dtype']
a :List[str] = []
a :int = True
@classmethod
def _lowercase ( cls : List[Any] , SCREAMING_SNAKE_CASE_ : Dict[str, Any] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Any , ) -> Dict:
lowercase_ , lowercase_ = cls.load_config(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase_ , lowercase_ = cls.from_config(SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , '''create_state''' ) and getattr(SCREAMING_SNAKE_CASE_ , '''has_state''' , SCREAMING_SNAKE_CASE_ ):
lowercase_ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
self.save_config(save_directory=SCREAMING_SNAKE_CASE_ , push_to_hub=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
return self._get_compatibles()
@classmethod
def _lowercase ( cls : Union[str, Any] ) -> List[str]:
lowercase_ = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ = importlib.import_module(__name__.split('''.''' )[0] )
lowercase_ = [
getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
return compatible_classes
def a ( snake_case__: jnp.ndarray , snake_case__: Tuple[int] ):
'''simple docstring'''
assert len(snake_case__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(snake_case__ ) - x.ndim) ) , snake_case__ )
def a ( snake_case__: int , snake_case__: Tuple=0.9_9_9 , snake_case__: Dict=jnp.floataa ):
'''simple docstring'''
def alpha_bar(snake_case__: Dict ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
lowercase_ = []
for i in range(snake_case__ ):
lowercase_ = i / num_diffusion_timesteps
lowercase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(snake_case__ ) / alpha_bar(snake_case__ ) , snake_case__ ) )
return jnp.array(snake_case__ , dtype=snake_case__ )
@flax.struct.dataclass
class lowercase__:
"""simple docstring"""
a :jnp.ndarray
a :jnp.ndarray
a :jnp.ndarray
@classmethod
def _lowercase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
lowercase_ = scheduler.config
if config.trained_betas is not None:
lowercase_ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
lowercase_ = 1.0 - betas
lowercase_ = jnp.cumprod(SCREAMING_SNAKE_CASE_ , axis=0 )
return cls(
alphas=SCREAMING_SNAKE_CASE_ , betas=SCREAMING_SNAKE_CASE_ , alphas_cumprod=SCREAMING_SNAKE_CASE_ , )
def a ( snake_case__: CommonSchedulerState , snake_case__: jnp.ndarray , snake_case__: jnp.ndarray , snake_case__: jnp.ndarray ):
'''simple docstring'''
lowercase_ = state.alphas_cumprod
lowercase_ = alphas_cumprod[timesteps] ** 0.5
lowercase_ = sqrt_alpha_prod.flatten()
lowercase_ = broadcast_to_shape_from_left(snake_case__ , original_samples.shape )
lowercase_ = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ = sqrt_one_minus_alpha_prod.flatten()
lowercase_ = broadcast_to_shape_from_left(snake_case__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def a ( snake_case__: CommonSchedulerState , snake_case__: jnp.ndarray , snake_case__: jnp.ndarray , snake_case__: jnp.ndarray ):
'''simple docstring'''
lowercase_ , lowercase_ = get_sqrt_alpha_prod(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def a ( snake_case__: CommonSchedulerState , snake_case__: jnp.ndarray , snake_case__: jnp.ndarray , snake_case__: jnp.ndarray ):
'''simple docstring'''
lowercase_ , lowercase_ = get_sqrt_alpha_prod(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase_ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 97 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase__ : int):
lowerCamelCase : Optional[int] = str(UpperCAmelCase__)
return len(UpperCAmelCase__) == 9 and set(UpperCAmelCase__) == set('123456789')
def UpperCAmelCase ( ):
for base_num in range(99_99 , 49_99 , -1):
lowerCamelCase : Dict = 10_00_02 * base_num
if is_9_pandigital(UpperCAmelCase__):
return candidate
for base_num in range(3_33 , 99 , -1):
lowerCamelCase : Tuple = 1_00_20_03 * base_num
if is_9_pandigital(UpperCAmelCase__):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 320 | 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ :Dict = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
def __init__( self : Any , **_lowercase : int ):
super().__init__(**_lowercase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , _lowercase : Union[str, List[str], "Image", List["Image"]] , **_lowercase : List[str] ):
return super().__call__(_lowercase , **_lowercase )
def lowercase__ ( self : Tuple , **_lowercase : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE__ : int = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowercase__ ( self : Tuple , _lowercase : int , _lowercase : Dict=None , _lowercase : str="This is a photo of {}." ):
SCREAMING_SNAKE_CASE__ : Dict = load_image(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ : str = candidate_labels
SCREAMING_SNAKE_CASE__ : Tuple = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [text_inputs]
return inputs
def lowercase__ ( self : List[Any] , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs.pop('''candidate_labels''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text_inputs[0]
else:
# Batching case.
SCREAMING_SNAKE_CASE__ : Any = text_inputs[0][0]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model(**_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Any = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowercase__ ( self : List[str] , _lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.pop('''candidate_labels''' )
SCREAMING_SNAKE_CASE__ : Dict = model_outputs['''logits'''][0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : str = probs.tolist()
if not isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Any = [scores]
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Optional[Any] = stable_softmax(_lowercase , axis=-1 )
SCREAMING_SNAKE_CASE__ : Tuple = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
SCREAMING_SNAKE_CASE__ : List[str] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda _lowercase : -x[0] )
]
return result
| 721 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
a_ :Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( _UpperCAmelCase ):
def __init__( self : int , _lowercase : Tuple , _lowercase : Optional[Any]=7_68 ):
super().__init__(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = proj_size
SCREAMING_SNAKE_CASE__ : Dict = CLIPVisionModel(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = PaintByExampleMapper(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.LayerNorm(config.hidden_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
SCREAMING_SNAKE_CASE__ : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowercase__ ( self : str , _lowercase : Tuple , _lowercase : Optional[int]=False ):
SCREAMING_SNAKE_CASE__ : Tuple = self.model(pixel_values=_lowercase )
SCREAMING_SNAKE_CASE__ : int = clip_output.pooler_output
SCREAMING_SNAKE_CASE__ : List[Any] = self.mapper(latent_states[:, None] )
SCREAMING_SNAKE_CASE__ : Dict = self.final_layer_norm(_lowercase )
SCREAMING_SNAKE_CASE__ : str = self.proj_out(_lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowercase ( nn.Module ):
def __init__( self : Any , _lowercase : Optional[int] ):
super().__init__()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (config.num_hidden_layers + 1) // 5
SCREAMING_SNAKE_CASE__ : Tuple = config.hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(_lowercase , _lowercase , _lowercase , activation_fn='''gelu''' , attention_bias=_lowercase )
for _ in range(_lowercase )
] )
def lowercase__ ( self : int , _lowercase : str ):
for block in self.blocks:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = block(_lowercase )
return hidden_states
| 250 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Dict:
inspect_dataset(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
inspect_metric(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : List[str] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
with pytest.raises(UpperCamelCase ):
get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : int = get_dataset_config_names(UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
_UpperCamelCase : Dict = get_dataset_infos(UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : Dict = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = get_dataset_infos(UpperCamelCase )
assert expected_config in infos
_UpperCamelCase : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
with pytest.raises(UpperCamelCase ):
get_dataset_split_names(UpperCamelCase ,config_name=UpperCamelCase )
| 683 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="attention" ) -> List[str]:
_UpperCamelCase : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : Optional[int] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : List[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]:
if split_mlp_wi:
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : Tuple = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = (wi_a, wi_a)
else:
_UpperCamelCase : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def snake_case__ ( UpperCamelCase ,*, UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> int:
_UpperCamelCase : Any = traverse_util.flatten_dict(variables['''target'''] )
_UpperCamelCase : Optional[Any] = {'''/'''.join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,UpperCamelCase )
_UpperCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''attention''' )
_UpperCamelCase : Tuple = layer_norm
_UpperCamelCase : int = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : Dict = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,UpperCamelCase )
_UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : List[Any] = wi.T
_UpperCamelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup(
UpperCamelCase ,UpperCamelCase ,'''encoder''' ).T
_UpperCamelCase : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_UpperCamelCase : List[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''encoder''' ).T
_UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''self_attention''' )
_UpperCamelCase : int = layer_norm
_UpperCamelCase : Union[str, Any] = k.T
_UpperCamelCase : Optional[int] = o.T
_UpperCamelCase : Dict = q.T
_UpperCamelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : str = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''encoder_decoder_attention''' )
_UpperCamelCase : Dict = layer_norm
_UpperCamelCase : Optional[int] = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : str = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : List[Any] = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,UpperCamelCase )
_UpperCamelCase : List[str] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Union[str, Any] = wi[1].T
else:
_UpperCamelCase : Dict = wi.T
_UpperCamelCase : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : int = tax_relpos_bias_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ).T
_UpperCamelCase : Optional[int] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : str = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
_UpperCamelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : int = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_UpperCamelCase : Any = state_dict['''shared.weight''']
return state_dict
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
_UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCamelCase )
_UpperCamelCase : str = convert_tax_to_pytorch(
UpperCamelCase ,num_layers=config.num_layers ,is_encoder_only=UpperCamelCase ,scalable_attention=UpperCamelCase )
_UpperCamelCase : Optional[Any] = make_state_dict(UpperCamelCase ,UpperCamelCase )
model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
_UpperCamelCase : int = MTaConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : Optional[int] = UMTaEncoderModel(UpperCamelCase )
else:
_UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 683 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = 'naver-clova-ix/donut-base-finetuned-docvqa'
lowerCAmelCase = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
lowerCAmelCase = 'document_qa'
lowerCAmelCase = AutoProcessor
lowerCAmelCase = VisionEncoderDecoderModel
lowerCAmelCase = ['image', 'text']
lowerCAmelCase = ['text']
def __init__( self , *a__ , **a__ ) -> Any:
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def _UpperCAmelCase ( self , a__ , a__ ) -> Optional[int]:
A = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
A = task_prompt.replace("""{user_input}""" , a__ )
A = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
A = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _UpperCAmelCase ( self , a__ ) -> Tuple:
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def _UpperCAmelCase ( self , a__ ) -> Tuple:
A = self.pre_processor.batch_decode(a__ )[0]
A = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
A = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
A = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
A = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 546 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=3 , a__=0.6 , a__=None , ) -> Union[str, Any]:
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = mask_ratio
A = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Any:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _UpperCAmelCase ( self , a__ , a__ , a__ ) -> Optional[int]:
A = TFViTMAEModel(config=a__ )
A = model(a__ , training=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a__ , a__ , a__ ) -> int:
A = TFViTMAEForPreTraining(a__ )
A = model(a__ , training=a__ )
# expected sequence length = num_patches
A = (self.image_size // self.patch_size) ** 2
A = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A = 1
A = TFViTMAEForPreTraining(a__ )
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(a__ , training=a__ )
A = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = self.prepare_config_and_inputs()
((A) , (A) , (A)) = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowerCAmelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCAmelCase ( self ) -> Dict:
A = TFViTMAEModelTester(self )
A = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> int:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , tf.keras.layers.Layer ) )
def _UpperCAmelCase ( self ) -> Any:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(a__ )
A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _UpperCAmelCase ( self ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
def _UpperCAmelCase ( self ) -> int:
# make the mask reproducible
np.random.seed(2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = int((config.image_size // config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A = model_class(a__ )
A = self._prepare_for_class(a__ , a__ )
A = model(a__ , noise=a__ )
A = copy.deepcopy(self._prepare_for_class(a__ , a__ ) )
A = model(**a__ , noise=a__ )
A = outputs_dict[0].numpy()
A = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def _UpperCAmelCase ( self ) -> Optional[int]:
# make the mask reproducible
np.random.seed(2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = int((config.image_size // config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(a__ ):
A = {}
for k, v in inputs_dict.items():
if tf.is_tensor(a__ ):
A = v.numpy()
else:
A = np.array(a__ )
return inputs_np_dict
for model_class in self.all_model_classes:
A = model_class(a__ )
A = self._prepare_for_class(a__ , a__ )
A = prepare_numpy_arrays(a__ )
A = model(a__ , noise=a__ )
A = model(**a__ , noise=a__ )
self.assert_outputs_same(a__ , a__ )
def _UpperCAmelCase ( self , a__ , a__ , a__ ) -> Dict:
# make masks reproducible
np.random.seed(2 )
A = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A = tf.constant(a__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A = tf_noise
super().check_pt_tf_models(a__ , a__ , a__ )
def _UpperCAmelCase ( self ) -> Tuple:
# make mask reproducible
np.random.seed(2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(a__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(a__ , a__ ),)
if isinstance(a__ , a__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(a__ , """_keras_serializable""" , a__ )
}
A = int((config.image_size // config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A = tf.convert_to_tensor(a__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
A = main_layer_class(a__ )
A = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
A = tf.keras.Model(a__ , outputs=main_layer(a__ ) )
A = model(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(a__ , """keras_model.h5""" )
model.save(a__ )
A = tf.keras.models.load_model(
a__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(a__ , tf.keras.Model )
A = model(a__ )
self.assert_outputs_same(a__ , a__ )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
# make mask reproducible
np.random.seed(2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = int((config.image_size // config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A = model_class(a__ )
A = self._prepare_for_class(a__ , a__ )
A = model(a__ , noise=a__ )
if model_class.__name__ == "TFViTMAEModel":
A = outputs.last_hidden_state.numpy()
A = 0
else:
A = outputs.logits.numpy()
A = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ , saved_model=a__ )
A = model_class.from_pretrained(a__ )
A = model(a__ , noise=a__ )
if model_class.__name__ == "TFViTMAEModel":
A = after_outputs["""last_hidden_state"""].numpy()
A = 0
else:
A = after_outputs["""logits"""].numpy()
A = 0
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1e-5 )
def _UpperCAmelCase ( self ) -> Dict:
# make mask reproducible
np.random.seed(2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = int((config.image_size // config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A = model_class(a__ )
A = self._prepare_for_class(a__ , a__ )
A = model(a__ , noise=a__ )
A = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(a__ )
A = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
A = model_class.from_config(model.config )
A = new_model(a__ ) # Build model
new_model.set_weights(model.get_weights() )
A = new_model(a__ , noise=a__ )
self.assert_outputs_same(a__ , a__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@slow
def _UpperCAmelCase ( self ) -> str:
A = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(a__ )
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=a__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A = ViTMAEConfig()
A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A = np.random.uniform(size=(1, num_patches) )
# forward pass
A = model(**a__ , noise=a__ )
# verify the logits
A = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , a__ )
A = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , a__ , atol=1e-4 )
| 546 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float:
lowerCamelCase : List[str] = sorted(numsa + numsa )
lowerCamelCase , lowerCamelCase : Tuple = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase :str = [float(x) for x in input('Enter the elements of first array: ').split()]
__lowerCamelCase :Optional[Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 222 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def snake_case ( UpperCamelCase__ : str ) -> Any:
lowerCamelCase : Dict = model.config
lowerCamelCase : Dict = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowerCamelCase : Dict = MBartConfig(
is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , )
return encoder_config, decoder_config
def snake_case ( UpperCamelCase__ : str ) -> List[str]:
if "encoder.model" in name:
lowerCamelCase : Optional[Any] = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowerCamelCase : Tuple = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowerCamelCase : Optional[int] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase : Dict = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowerCamelCase : str = """encoder.""" + name
if "attn.proj" in name:
lowerCamelCase : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowerCamelCase : List[str] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase : str = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase : List[str] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowerCamelCase : Optional[int] = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowerCamelCase : List[Any] = """encoder.layernorm.bias"""
return name
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
lowerCamelCase : List[Any] = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
lowerCamelCase : Tuple = key.split(""".""" )
lowerCamelCase : Dict = int(key_split[3] )
lowerCamelCase : List[Any] = int(key_split[5] )
lowerCamelCase : Dict = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase : List[Any] = val[:dim, :]
lowerCamelCase : Any = val[dim : dim * 2, :]
lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase : str = val[:dim]
lowerCamelCase : List[str] = val[dim : dim * 2]
lowerCamelCase : str = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowerCamelCase : Dict = val
return orig_state_dict
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=False ) -> Optional[int]:
# load original model
lowerCamelCase : Tuple = DonutModel.from_pretrained(UpperCamelCase__ ).eval()
# load HuggingFace model
lowerCamelCase , lowerCamelCase : List[Any] = get_configs(UpperCamelCase__ )
lowerCamelCase : str = DonutSwinModel(UpperCamelCase__ )
lowerCamelCase : Optional[int] = MBartForCausalLM(UpperCamelCase__ )
lowerCamelCase : int = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
lowerCamelCase : Any = original_model.state_dict()
lowerCamelCase : str = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify results on scanned document
lowerCamelCase : Optional[int] = load_dataset("""hf-internal-testing/example-documents""" )
lowerCamelCase : str = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowerCamelCase : List[Any] = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowerCamelCase : Dict = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowerCamelCase : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowerCamelCase : Union[str, Any] = """When is the coffee break?"""
lowerCamelCase : int = task_prompt.replace("""{user_input}""" , UpperCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowerCamelCase : Dict = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowerCamelCase : Tuple = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowerCamelCase : Optional[int] = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowerCamelCase : Optional[int] = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowerCamelCase : Tuple = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowerCamelCase : Any = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[
"""input_ids"""
]
lowerCamelCase : str = original_model.encoder.model.patch_embed(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Optional[Any] = model.encoder.embeddings(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
# verify encoder hidden states
lowerCamelCase : Union[str, Any] = original_model.encoder(UpperCamelCase__ )
lowerCamelCase : Tuple = model.encoder(UpperCamelCase__ ).last_hidden_state
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-2 )
# verify decoder hidden states
lowerCamelCase : Any = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits
lowerCamelCase : Tuple = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
__lowerCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
__lowerCamelCase :Tuple = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 222 | 1 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_a : List[str] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_a : List[Any] = 'main'
# Default branch name
_a : Dict = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
_a : Any = 'aaaaaaa'
# This commit does not exist, so we should 404.
_a : str = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
_a : Tuple = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def a_ ( ) -> str:
"""simple docstring"""
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def a_ ( ) -> List[str]:
"""simple docstring"""
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class a_ ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def lowerCAmelCase( self : str , UpperCAmelCase__ : str ):
"""simple docstring"""
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : str ):
"""simple docstring"""
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(find_labels(UpperCAmelCase__ ) , ['''labels'''] )
self.assertEqual(find_labels(UpperCAmelCase__ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(UpperCAmelCase__ ) , ['''start_positions''', '''end_positions'''] )
class a_ ( a ):
pass
self.assertEqual(find_labels(UpperCAmelCase__ ) , ['''labels'''] )
@require_tf
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(find_labels(UpperCAmelCase__ ) , ['''labels'''] )
self.assertEqual(find_labels(UpperCAmelCase__ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(UpperCAmelCase__ ) , ['''start_positions''', '''end_positions'''] )
class a_ ( a ):
pass
self.assertEqual(find_labels(UpperCAmelCase__ ) , ['''labels'''] )
@require_flax
def lowerCAmelCase( self : int ):
"""simple docstring"""
self.assertEqual(find_labels(UpperCAmelCase__ ) , [] )
self.assertEqual(find_labels(UpperCAmelCase__ ) , [] )
self.assertEqual(find_labels(UpperCAmelCase__ ) , [] )
class a_ ( a ):
pass
self.assertEqual(find_labels(UpperCAmelCase__ ) , [] )
| 720 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a_ ( a , unittest.TestCase ):
A__ : Dict = ReformerTokenizer
A__ : Optional[int] = ReformerTokenizerFast
A__ : str = True
A__ : Tuple = False
A__ : str = True
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
super().setUp()
snake_case : str = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : int = '''<s>'''
snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_000 )
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case : Any = self.get_tokenizer()
snake_case : str = self.get_rust_tokenizer()
snake_case : Tuple = '''I was born in 92000, and this is falsé.'''
snake_case : str = tokenizer.tokenize(UpperCAmelCase__ )
snake_case : int = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : Union[str, Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
snake_case : List[str] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Optional[int] = tokenizer.encode(UpperCAmelCase__ )
snake_case : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : List[Any]=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# Simple input
snake_case : Union[str, Any] = '''This is a simple input'''
snake_case : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case : int = ('''This is a simple input''', '''This is a pair''')
snake_case : int = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Union[str, Any] = ReformerTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
snake_case : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [285, 46, 10, 170, 382] , )
snake_case : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Any = '''Hello World!'''
snake_case : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case : Union[str, Any] = ''' '''.join(UpperCAmelCase__ )
snake_case : Optional[int] = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='''pt''' )
snake_case : List[str] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
snake_case : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case : Tuple = encoded_sequence['''input_ids'''].shape
snake_case : List[Any] = ReformerModel(UpperCAmelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
# fmt: off
snake_case : Tuple = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case : Tuple = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCAmelCase__ , sequences=UpperCAmelCase__ , )
| 84 | 0 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
snake_case_ = logging.getLogger(__name__)
snake_case_ = """Hello world! cécé herlolip"""
snake_case_ = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def __lowercase (_SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :Optional[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = BertAbsConfig(
temp_dir='''.''' , finetune_bert=A_ , large=A_ , share_emb=A_ , use_bert_emb=A_ , encoder='''bert''' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE : Any = torch.load(A_ , lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : storage )
SCREAMING_SNAKE_CASE : Optional[int] = AbsSummarizer(A_ , torch.device('''cpu''' ) , A_ )
original.eval()
SCREAMING_SNAKE_CASE : List[str] = BertAbsSummarizer(A_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(A_ )) )
SCREAMING_SNAKE_CASE : str = torch.tensor(A_ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Dict = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(A_ )) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(A_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE : List[Any] = encoder_input_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_input_ids
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Dict = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE : List[str] = original(A_ , A_ , A_ , A_ , A_ , A_ , A_ )[0]
SCREAMING_SNAKE_CASE : List[Any] = original.generator(A_ )
SCREAMING_SNAKE_CASE : int = new_model(
A_ , A_ , A_ , A_ , A_ )[0]
SCREAMING_SNAKE_CASE : Tuple = new_model.generator(A_ )
SCREAMING_SNAKE_CASE : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.allclose(A_ , A_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
snake_case_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 507 | import torch
from torch import nn
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self: int , __lowerCAmelCase: List[Any] , __lowerCAmelCase: str , __lowerCAmelCase: int , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Dict=1 , __lowerCAmelCase: Union[str, Any]=False ) -> int:
'''simple docstring'''
super().__init__()
__UpperCAmelCase = n_token
__UpperCAmelCase = d_embed
__UpperCAmelCase = d_proj
__UpperCAmelCase = cutoffs + [n_token]
__UpperCAmelCase = [0] + self.cutoffs
__UpperCAmelCase = div_val
__UpperCAmelCase = self.cutoffs[0]
__UpperCAmelCase = len(self.cutoffs ) - 1
__UpperCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
__UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
__UpperCAmelCase = nn.ModuleList()
__UpperCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
else:
self.out_projs.append(__lowerCAmelCase )
self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
__UpperCAmelCase , __UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__UpperCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) )
__UpperCAmelCase = keep_order
def _UpperCAmelCase ( self: str , __lowerCAmelCase: int , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Dict ) -> Dict:
'''simple docstring'''
if proj is None:
__UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() )
__UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Optional[Any]=None , __lowerCAmelCase: Union[str, Any]=False ) -> Tuple:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
__UpperCAmelCase = hidden[..., :-1, :].contiguous()
__UpperCAmelCase = labels[..., 1:].contiguous()
__UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
__UpperCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
__UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
__UpperCAmelCase = labels != -100
__UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
__UpperCAmelCase = (
-nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
__UpperCAmelCase , __UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__UpperCAmelCase , __UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
__UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
__UpperCAmelCase = self.out_layers[i].weight
__UpperCAmelCase = self.out_layers[i].bias
if i == 0:
__UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
if labels is None:
__UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
__UpperCAmelCase , __UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__UpperCAmelCase = (labels >= l_idx) & (labels < r_idx)
__UpperCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__UpperCAmelCase = labels.index_select(0 , __lowerCAmelCase ) - l_idx
__UpperCAmelCase = head_logprob.index_select(0 , __lowerCAmelCase )
__UpperCAmelCase = hidden.index_select(0 , __lowerCAmelCase )
else:
__UpperCAmelCase = hidden
if i == 0:
if labels is not None:
__UpperCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
__UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
__UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__UpperCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
__UpperCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__UpperCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _UpperCAmelCase ( self: int , __lowerCAmelCase: Optional[Any] ) -> List[Any]:
'''simple docstring'''
if self.n_clusters == 0:
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
__UpperCAmelCase , __UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__UpperCAmelCase , __UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
__UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
__UpperCAmelCase = self.out_layers[i].weight
__UpperCAmelCase = self.out_layers[i].bias
if i == 0:
__UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
__UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
__UpperCAmelCase , __UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
__UpperCAmelCase = head_logprob[:, -i] + tail_logprob_i
__UpperCAmelCase = logprob_i
return out
| 221 | 0 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase__ = ['text', 'image', 'audio']
def __UpperCAmelCase ( lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase__ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input')
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO')) / '000000039769.png').resize((512, 512)))
elif input_type == "audio":
inputs.append(torch.ones(3_000))
elif isinstance(lowerCamelCase_ , lowerCamelCase_):
inputs.append(create_inputs(lowerCamelCase_))
else:
raise ValueError(f'Invalid type requested: {input_type}')
return inputs
def __UpperCAmelCase ( lowerCamelCase_) -> Dict:
UpperCamelCase__ : Tuple = []
for output in outputs:
if isinstance(lowerCamelCase_ , (str, AgentText)):
output_types.append('text')
elif isinstance(lowerCamelCase_ , (Image.Image, AgentImage)):
output_types.append('image')
elif isinstance(lowerCamelCase_ , (torch.Tensor, AgentAudio)):
output_types.append('audio')
else:
raise ValueError(f'Invalid output: {output}')
return output_types
@is_tool_test
class __lowercase :
def __UpperCamelCase ( self : List[str]):
self.assertTrue(hasattr(self.tool , 'inputs'))
self.assertTrue(hasattr(self.tool , 'outputs'))
UpperCamelCase__ : int = self.tool.inputs
for _input in inputs:
if isinstance(_input , UpperCAmelCase_):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
UpperCamelCase__ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : List[str] = create_inputs(self.tool.inputs)
UpperCamelCase__ : Any = self.tool(*UpperCAmelCase_)
# There is a single output
if len(self.tool.outputs) == 1:
UpperCamelCase__ : Optional[Any] = [outputs]
self.assertListEqual(output_types(UpperCAmelCase_) , self.tool.outputs)
def __UpperCamelCase ( self : Optional[int]):
self.assertTrue(hasattr(self.tool , 'description'))
self.assertTrue(hasattr(self.tool , 'default_checkpoint'))
self.assertTrue(self.tool.description.startswith('This is a tool that'))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Any = create_inputs(self.tool.inputs)
UpperCamelCase__ : Optional[Any] = self.tool(*UpperCAmelCase_)
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : int = [outputs]
self.assertEqual(len(UpperCAmelCase_) , len(self.tool.outputs))
for output, output_type in zip(UpperCAmelCase_ , self.tool.outputs):
UpperCamelCase__ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : List[str] = create_inputs(self.tool.inputs)
UpperCamelCase__ : Optional[int] = []
for _input, input_type in zip(UpperCAmelCase_ , self.tool.inputs):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
UpperCamelCase__ : List[Any] = self.tool(*UpperCAmelCase_)
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : int = [outputs]
self.assertEqual(len(UpperCAmelCase_) , len(self.tool.outputs))
| 6 |
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : bool = False):
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : List[Any] = is_leaf
UpperCamelCase__ : Optional[Any] = prefix
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = 0
for q, w in zip(self.prefix , UpperCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : list[str]):
for word in words:
self.insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : str):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Optional[Any] = RadixNode(prefix=UpperCAmelCase_ , is_leaf=UpperCAmelCase_)
else:
UpperCamelCase__ : int = self.nodes[word[0]]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = incoming_node.match(
UpperCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : str = self.nodes[matching_string[0]]
UpperCamelCase__ : Optional[Any] = RadixNode(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : int = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str):
UpperCamelCase__ : Optional[int] = self.nodes.get(word[0] , UpperCAmelCase_)
if not incoming_node:
return False
else:
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = incoming_node.match(
UpperCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
UpperCamelCase__ : List[str] = list(self.nodes.values())[0]
UpperCamelCase__ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
UpperCamelCase__ : str = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : List[Any] = list(incoming_node.nodes.values())[0]
UpperCamelCase__ : Optional[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : Union[str, Any] = merging_node.nodes
return True
def __UpperCamelCase ( self : str , UpperCAmelCase_ : int = 0):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '')
for value in self.nodes.values():
value.print_tree(height + 1)
def __UpperCAmelCase ( ) -> bool:
UpperCamelCase__ : Union[str, Any] = 'banana bananas bandana band apple all beast'.split()
UpperCamelCase__ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_)
assert all(root.find(lowerCamelCase_) for word in words)
assert not root.find('bandanas')
assert not root.find('apps')
root.delete('all')
assert not root.find('all')
root.delete('banana')
assert not root.find('banana')
assert root.find('bananas')
return True
def __UpperCAmelCase ( ) -> None:
assert test_trie()
def __UpperCAmelCase ( ) -> None:
UpperCamelCase__ : List[Any] = RadixNode()
UpperCamelCase__ : List[str] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_)
print('Words:' , lowerCamelCase_)
print('Tree:')
root.print_tree()
if __name__ == "__main__":
main()
| 6 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
)
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
_lowerCAmelCase = math.log(len(__lowerCamelCase ), 2 )
print(F'''Optimal value : {minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 589 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589 | 1 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 715 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = '''xlm'''
__SCREAMING_SNAKE_CASE : int = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_1_4_5 , SCREAMING_SNAKE_CASE__ : int=2_0_4_8 , SCREAMING_SNAKE_CASE__ : int=1_2 , SCREAMING_SNAKE_CASE__ : Any=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Dict=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8**-0.5 , SCREAMING_SNAKE_CASE__ : Dict=1e-12 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="first" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Dict=0 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
__a : Optional[Any] = vocab_size
__a : int = emb_dim
__a : Tuple = n_layers
__a : List[str] = n_heads
__a : Any = dropout
__a : Any = attention_dropout
__a : Any = gelu_activation
__a : Optional[int] = sinusoidal_embeddings
__a : Union[str, Any] = causal
__a : str = asm
__a : Optional[Any] = n_langs
__a : int = use_lang_emb
__a : List[str] = layer_norm_eps
__a : Optional[int] = bos_index
__a : Any = eos_index
__a : str = pad_index
__a : List[str] = unk_index
__a : List[Any] = mask_index
__a : Tuple = is_encoder
__a : str = max_position_embeddings
__a : Any = embed_init_std
__a : int = init_std
__a : Dict = summary_type
__a : List[Any] = summary_use_proj
__a : Dict = summary_activation
__a : Union[str, Any] = summary_proj_to_labels
__a : List[Any] = summary_first_dropout
__a : List[Any] = start_n_top
__a : Tuple = end_n_top
__a : int = mask_token_id
__a : str = lang_id
if "n_words" in kwargs:
__a : Optional[Any] = kwargs['n_words']
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _UpperCamelCase( __lowerCamelCase ):
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 577 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__UpperCAmelCase = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__UpperCAmelCase = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
__UpperCAmelCase = BeautifulSoup(res.text, 'html.parser')
__UpperCAmelCase = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 406 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=3_2 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=[1_0, 2_0, 3_0, 4_0] , _UpperCamelCase=[2, 2, 3, 2] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=1_0 , _UpperCamelCase=0.02 , _UpperCamelCase=["stage2", "stage3", "stage4"] , _UpperCamelCase=[2, 3, 4] , _UpperCamelCase=None , ) -> str:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : List[Any] = image_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : List[Any] = num_stages
UpperCAmelCase_ : str = hidden_sizes
UpperCAmelCase_ : Any = depths
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : List[str] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
UpperCAmelCase_ : List[Any] = scope
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> List[str]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Any = ConvNextModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : List[str] = ConvNextForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Any = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : List[str] = ConvNextBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : int = model(_UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Tuple = ConvNextBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Tuple = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : str = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_snake_case : int = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_snake_case : Any = True
_snake_case : Optional[int] = False
_snake_case : Optional[int] = False
_snake_case : Union[str, Any] = False
_snake_case : List[str] = False
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Dict = ConvNextModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ) -> List[str]:
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def __UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def __UpperCAmelCase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_UpperCamelCase )
UpperCAmelCase_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : str = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
UpperCAmelCase_ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Dict = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : int = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def __UpperCAmelCase ( self ) -> int:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = ConvNextModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ) -> int:
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : Tuple = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_UpperCamelCase )
# verify the logits
UpperCAmelCase_ : int = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
UpperCAmelCase_ : int = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
@require_torch
class lowerCamelCase (unittest.TestCase , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = (ConvNextBackbone,) if is_torch_available() else ()
_snake_case : Union[str, Any] = ConvNextConfig
_snake_case : Tuple = False
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = ConvNextModelTester(self )
| 406 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Optional[int] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a (lowerCAmelCase__ , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileBertTokenizer
SCREAMING_SNAKE_CASE = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
SCREAMING_SNAKE_CASE = 'google/mobilebert-uncased'
def UpperCamelCase ( self ) -> Tuple:
super().setUp()
_SCREAMING_SNAKE_CASE = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase ( self , A__ ) -> int:
_SCREAMING_SNAKE_CASE = '''UNwant\u00E9d,running'''
_SCREAMING_SNAKE_CASE = '''unwanted, running'''
return input_text, output_text
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_A , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = '''UNwant\u00E9d,running'''
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(_A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_SCREAMING_SNAKE_CASE = tokenizer.encode(_A , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(_A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# With lower casing
_SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=_A )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=_A )
_SCREAMING_SNAKE_CASE = '''UNwant\u00E9d,running'''
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(_A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_SCREAMING_SNAKE_CASE = tokenizer.encode(_A , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(_A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=_A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(_A ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=_A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCamelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCamelCase ( self ) -> Tuple:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(_A )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def UpperCamelCase ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(_A , **_A )
_SCREAMING_SNAKE_CASE = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
_SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(_A , """do_lower_case""" ) else False
_SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = ['''的''', '''人''', '''有''']
_SCREAMING_SNAKE_CASE = ''''''.join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(_A , **_A )
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(_A , **_A )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(_A , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(_A , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(_A )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(_A , **_A )
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(_A , **_A )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(_A , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(_A , add_special_tokens=_A )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(_A )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
_SCREAMING_SNAKE_CASE = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
| 591 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig()
# derive patch size from model name
__SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
__SCREAMING_SNAKE_CASE : Optional[Any] = 12
__SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
__SCREAMING_SNAKE_CASE : int = 4_096
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : Optional[int] = 24
__SCREAMING_SNAKE_CASE : Optional[int] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
if model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Any = 336
__SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Any = 768
return config
def a__ ( snake_case ):
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def a__ ( snake_case , snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' )
if key.startswith('''visual''' ):
__SCREAMING_SNAKE_CASE : List[Any] = key_split[3]
__SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[
:dim
]
__SCREAMING_SNAKE_CASE : Tuple = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:
]
else:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : str = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Dict = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[-dim:]
elif key.startswith('''mit''' ):
__SCREAMING_SNAKE_CASE : List[str] = key_split[2]
__SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : str = val[:dim, :]
__SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Any = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2]
__SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim, :]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__SCREAMING_SNAKE_CASE : int = val.T
__SCREAMING_SNAKE_CASE : Union[str, Any] = val
return orig_state_dict
def a__ ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy'''
elif num_frames == 32:
__SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy'''
__SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , )
__SCREAMING_SNAKE_CASE : int = np.load(snake_case )
return list(snake_case )
def a__ ( snake_case , snake_case=None , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name]
__SCREAMING_SNAKE_CASE : Any = 8
if "16-frames" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 16
elif "shot" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 32
__SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin'''
gdown.cached_download(snake_case , snake_case , quiet=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model''']
else:
__SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case )
__SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case )
__SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case )
# Verify outputs
__SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video
__SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 )
print('''Probs:''' , snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case , organization='''nielsr''' )
processor.push_to_hub(snake_case , organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 74 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = 0
@slow
def __a ( self ) -> Optional[Any]:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_UpperCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_UpperCamelCase ) , 0 )
def __a ( self ) -> Dict:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def __a ( self ) -> Any:
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
# Check that tokenizer_type ≠ model_type
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __a ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(_UpperCamelCase , "vocab.txt" ) )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type="bert" , use_fast=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(_UpperCamelCase , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(_UpperCamelCase , "merges.txt" ) )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type="gpt2" , use_fast=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@require_tokenizers
def __a ( self ) -> str:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(_UpperCamelCase , "vocab.txt" ) )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type="bert" )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(_UpperCamelCase , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(_UpperCamelCase , "merges.txt" ) )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase , tokenizer_type="gpt2" )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> Optional[int]:
with pytest.raises(_UpperCamelCase ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def __a ( self ) -> List[str]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCAmelCase_ = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _UpperCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , _UpperCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def __a ( self ) -> List[str]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_UpperCamelCase , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
lowerCAmelCase_ = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def __a ( self ) -> Any:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowerCAmelCase_ = TOKENIZER_MAPPING.values()
lowerCAmelCase_ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_UpperCamelCase )
@require_tokenizers
def __a ( self ) -> List[Any]:
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=_UpperCamelCase ) , _UpperCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , _UpperCamelCase )
@require_tokenizers
def __a ( self ) -> Tuple:
lowerCAmelCase_ = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=_UpperCamelCase )
lowerCAmelCase_ = "Hello, world. How are you?"
lowerCAmelCase_ = tokenizer.tokenize(_UpperCamelCase )
self.assertEqual("[UNK]" , tokens[0] )
lowerCAmelCase_ = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=_UpperCamelCase )
lowerCAmelCase_ = tokenizer.tokenize(_UpperCamelCase )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30_000 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def __a ( self ) -> str:
lowerCAmelCase_ = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> int:
# Check we can load the tokenizer config of an online model.
lowerCAmelCase_ = get_tokenizer_config("bert-base-cased" )
lowerCAmelCase_ = config.pop("_commit_hash" , _UpperCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_UpperCamelCase , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCAmelCase_ = get_tokenizer_config(_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
lowerCAmelCase_ = get_tokenizer_config(_UpperCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def __a ( self ) -> List[Any]:
try:
AutoConfig.register("custom" , _UpperCamelCase )
AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCamelCase ):
AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase )
lowerCAmelCase_ = CustomTokenizer.from_pretrained(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __a ( self ) -> str:
try:
AutoConfig.register("custom" , _UpperCamelCase )
# Can register in two steps
AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCamelCase ):
AutoTokenizer.register(_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = BertTokenizerFast.from_pretrained(_UpperCamelCase )
bert_tokenizer.save_pretrained(_UpperCamelCase )
lowerCAmelCase_ = CustomTokenizerFast.from_pretrained(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __a ( self ) -> Optional[Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_UpperCamelCase ):
lowerCAmelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCamelCase ):
lowerCAmelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCamelCase )
lowerCAmelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase , trust_remote_code=_UpperCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
lowerCAmelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCamelCase )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def __a ( self ) -> List[Any]:
class _lowerCAmelCase ( __a ):
_lowercase =False
class _lowerCAmelCase ( __a ):
_lowercase =NewTokenizer
_lowercase =False
try:
AutoConfig.register("custom" , _UpperCamelCase )
AutoTokenizer.register(_UpperCamelCase , slow_tokenizer_class=_UpperCamelCase )
AutoTokenizer.register(_UpperCamelCase , fast_tokenizer_class=_UpperCamelCase )
# If remote code is not set, the default is to use local
lowerCAmelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=_UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __a ( self ) -> Any:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=_UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
lowerCAmelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=_UpperCamelCase , use_fast=_UpperCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def __a ( self ) -> str:
with self.assertRaisesRegex(
_UpperCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
lowerCAmelCase_ = AutoTokenizer.from_pretrained("bert-base" )
def __a ( self ) -> List[str]:
with self.assertRaisesRegex(
_UpperCamelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase , revision="aaaaaa" )
def __a ( self ) -> int:
# Make sure we have cached the tokenizer.
lowerCAmelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
lowerCAmelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 279 |
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( __a ):
_lowercase ='''SpeechT5FeatureExtractor'''
_lowercase ='''SpeechT5Tokenizer'''
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> int:
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
lowerCAmelCase_ = kwargs.pop("audio" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("text" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("text_target" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("audio_target" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("sampling_rate" , _UpperCamelCase )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
lowerCAmelCase_ = self.feature_extractor(_UpperCamelCase , *_UpperCamelCase , sampling_rate=_UpperCamelCase , **_UpperCamelCase )
elif text is not None:
lowerCAmelCase_ = self.tokenizer(_UpperCamelCase , **_UpperCamelCase )
else:
lowerCAmelCase_ = None
if audio_target is not None:
lowerCAmelCase_ = self.feature_extractor(audio_target=_UpperCamelCase , *_UpperCamelCase , sampling_rate=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = targets["input_values"]
elif text_target is not None:
lowerCAmelCase_ = self.tokenizer(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = targets["input_ids"]
else:
lowerCAmelCase_ = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase_ = labels
lowerCAmelCase_ = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCAmelCase_ = decoder_attention_mask
return inputs
def __a ( self , *_UpperCamelCase , **_UpperCamelCase ) -> str:
lowerCAmelCase_ = kwargs.pop("input_values" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("input_ids" , _UpperCamelCase )
lowerCAmelCase_ = kwargs.pop("labels" , _UpperCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
lowerCAmelCase_ = self.feature_extractor.pad(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
elif input_ids is not None:
lowerCAmelCase_ = self.tokenizer.pad(_UpperCamelCase , **_UpperCamelCase )
else:
lowerCAmelCase_ = None
if labels is not None:
if "input_ids" in labels or (isinstance(_UpperCamelCase , _UpperCamelCase ) and "input_ids" in labels[0]):
lowerCAmelCase_ = self.tokenizer.pad(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = targets["input_ids"]
else:
lowerCAmelCase_ = self.feature_extractor.feature_size
lowerCAmelCase_ = self.feature_extractor.num_mel_bins
lowerCAmelCase_ = self.feature_extractor.pad(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = feature_size_hack
lowerCAmelCase_ = targets["input_values"]
else:
lowerCAmelCase_ = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase_ = labels
lowerCAmelCase_ = targets.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCAmelCase_ = decoder_attention_mask
return inputs
def __a ( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def __a ( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
| 279 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : List[str]= LDMTextToImagePipeline
_a : List[Any]= TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_a : str= PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_a : Optional[Any]= TEXT_TO_IMAGE_BATCH_PARAMS
_a : Optional[Any]= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : str = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
lowercase : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=snake_case ,set_alpha_to_one=snake_case ,)
torch.manual_seed(0 )
lowercase : List[Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") ,up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") ,latent_channels=4 ,)
torch.manual_seed(0 )
lowercase : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
lowercase : List[str] = CLIPTextModel(snake_case )
lowercase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=0 ):
'''simple docstring'''
if str(snake_case ).startswith("""mps""" ):
lowercase : List[str] = torch.manual_seed(snake_case )
else:
lowercase : List[Any] = torch.Generator(device=snake_case ).manual_seed(snake_case )
lowercase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase : int = self.get_dummy_components()
lowercase : Tuple = LDMTextToImagePipeline(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
lowercase : str = self.get_dummy_inputs(snake_case )
lowercase : List[str] = pipe(**snake_case ).images
lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
lowercase : Optional[int] = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=torch.floataa ,snake_case=0 ):
'''simple docstring'''
lowercase : Tuple = torch.manual_seed(snake_case )
lowercase : int = np.random.RandomState(snake_case ).standard_normal((1, 4, 32, 32) )
lowercase : Union[str, Any] = torch.from_numpy(snake_case ).to(device=snake_case ,dtype=snake_case )
lowercase : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
lowercase : List[str] = self.get_inputs(snake_case )
lowercase : Tuple = pipe(**snake_case ).images
lowercase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
lowercase : Union[str, Any] = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] )
lowercase : Tuple = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=torch.floataa ,snake_case=0 ):
'''simple docstring'''
lowercase : Dict = torch.manual_seed(snake_case )
lowercase : str = np.random.RandomState(snake_case ).standard_normal((1, 4, 32, 32) )
lowercase : Optional[int] = torch.from_numpy(snake_case ).to(device=snake_case ,dtype=snake_case )
lowercase : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
lowercase : Tuple = self.get_inputs(snake_case )
lowercase : List[Any] = pipe(**snake_case ).images[0]
lowercase : int = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
lowercase : str = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 336 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase : Optional[Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase : Dict = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
| 336 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
SCREAMING_SNAKE_CASE : Dict = "http://www.mocksite.com/file1.txt"
SCREAMING_SNAKE_CASE : Union[str, Any] = "\"text\": [\"foo\", \"foo\"]"
SCREAMING_SNAKE_CASE : Dict = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class snake_case :
"""simple docstring"""
_a = 200
_a = {"""Content-Length""": """100"""}
_a = {}
def a__ ( self, **_lowercase ) -> Union[str, Any]:
return [bytes(_lowercase, 'utf-8' )]
def _UpperCamelCase ( *lowerCAmelCase__: List[Any] ,**lowerCAmelCase__: Any ) -> Dict:
return MockResponse()
@pytest.mark.parametrize('urls_type' ,[str, list, dict] )
def _UpperCamelCase ( lowerCAmelCase__: str ,lowerCAmelCase__: str ,lowerCAmelCase__: Any ) -> List[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase__ ,'request' ,lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = URL
if issubclass(lowerCAmelCase__ ,lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = url
elif issubclass(lowerCAmelCase__ ,lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = [url]
elif issubclass(lowerCAmelCase__ ,lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = {'train': url}
SCREAMING_SNAKE_CASE_ = 'dummy'
SCREAMING_SNAKE_CASE_ = 'downloads'
SCREAMING_SNAKE_CASE_ = tmp_path
SCREAMING_SNAKE_CASE_ = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) ,use_etag=lowerCAmelCase__ ,)
SCREAMING_SNAKE_CASE_ = DownloadManager(dataset_name=lowerCAmelCase__ ,download_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = dl_manager.download(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = [downloaded_paths]
SCREAMING_SNAKE_CASE_ = [urls]
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
assert "train" in downloaded_paths.keys()
SCREAMING_SNAKE_CASE_ = downloaded_paths.values()
SCREAMING_SNAKE_CASE_ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
SCREAMING_SNAKE_CASE_ = Path(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
SCREAMING_SNAKE_CASE_ = downloaded_path.read_text()
assert content == CONTENT
SCREAMING_SNAKE_CASE_ = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
SCREAMING_SNAKE_CASE_ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' ,[str, list, dict] )
def _UpperCamelCase ( lowerCAmelCase__: Optional[int] ,lowerCAmelCase__: Tuple ,lowerCAmelCase__: str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = str(lowerCAmelCase__ )
if issubclass(lowerCAmelCase__ ,lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = filename
elif issubclass(lowerCAmelCase__ ,lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = [filename]
elif issubclass(lowerCAmelCase__ ,lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = {'train': filename}
SCREAMING_SNAKE_CASE_ = 'dummy'
SCREAMING_SNAKE_CASE_ = xz_file.parent
SCREAMING_SNAKE_CASE_ = 'extracted'
SCREAMING_SNAKE_CASE_ = DownloadConfig(
cache_dir=lowerCAmelCase__ ,use_etag=lowerCAmelCase__ ,)
SCREAMING_SNAKE_CASE_ = DownloadManager(dataset_name=lowerCAmelCase__ ,download_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = dl_manager.extract(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = [extracted_paths]
SCREAMING_SNAKE_CASE_ = [paths]
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
assert "train" in extracted_paths.keys()
SCREAMING_SNAKE_CASE_ = extracted_paths.values()
SCREAMING_SNAKE_CASE_ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
SCREAMING_SNAKE_CASE_ = Path(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase__ ,etag=lowerCAmelCase__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
SCREAMING_SNAKE_CASE_ = extracted_path.read_text()
SCREAMING_SNAKE_CASE_ = text_file.read_text()
assert extracted_file_content == expected_file_content
def _UpperCamelCase ( lowerCAmelCase__: Optional[Any] ,lowerCAmelCase__: Union[str, Any] ) -> Union[str, Any]:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase__ ,start=1 ):
SCREAMING_SNAKE_CASE_ = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' ,['tar_jsonl_path', 'zip_jsonl_path'] )
def _UpperCamelCase ( lowerCAmelCase__: Tuple ,lowerCAmelCase__: Dict ) -> int:
SCREAMING_SNAKE_CASE_ = request.getfixturevalue(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase__ ) ,start=1 ):
_test_jsonl(lowerCAmelCase__ ,lowerCAmelCase__ )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' ,['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def _UpperCamelCase ( lowerCAmelCase__: int ,lowerCAmelCase__: Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = request.getfixturevalue(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase__ ) ,start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase__ ) ,start=1 ):
_test_jsonl(lowerCAmelCase__ ,lowerCAmelCase__ )
assert num_tar == 1
assert num_jsonl == 2
def _UpperCamelCase ( lowerCAmelCase__: Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase__ ) ,start=1 ):
assert os.path.basename(lowerCAmelCase__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 711 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self, _lowercase, _lowercase=7, _lowercase=3, _lowercase=18, _lowercase=30, _lowercase=400, _lowercase=True, _lowercase=None, _lowercase=True, _lowercase=[0.5, 0.5, 0.5], _lowercase=[0.5, 0.5, 0.5], ) -> Tuple:
SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
def a__ ( self ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case ( lowercase_, unittest.TestCase ):
"""simple docstring"""
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> str:
SCREAMING_SNAKE_CASE_ = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase, 'image_mean' ) )
self.assertTrue(hasattr(_lowercase, 'image_std' ) )
self.assertTrue(hasattr(_lowercase, 'do_normalize' ) )
self.assertTrue(hasattr(_lowercase, 'do_resize' ) )
self.assertTrue(hasattr(_lowercase, 'size' ) )
def a__ ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'height': 42, 'width': 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase, Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowercase, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def a__ ( self ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowercase, numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase, np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowercase, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowercase, torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase, torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowercase, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
), )
| 238 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'CLIPImageProcessor'
_lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : Dict = kwargs.pop("feature_extractor" )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
_snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 670 | import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'linear'
_lowerCamelCase = 'cosine'
_lowerCamelCase = 'cosine_with_restarts'
_lowerCamelCase = 'polynomial'
_lowerCamelCase = 'constant'
_lowerCamelCase = 'constant_with_warmup'
_lowerCamelCase = 'piecewise_constant'
def snake_case (__lowercase , __lowercase = -1 ) -> List[Any]:
'''simple docstring'''
return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1.0 , __lowercase ) )
return 1.0
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_snake_case ,_snake_case : str = rule_str.split(":" )
_snake_case : Dict = int(__lowercase )
_snake_case : List[str] = float(__lowercase )
_snake_case : Tuple = value
_snake_case : str = float(rule_list[-1] )
def create_rules_function(__lowercase , __lowercase ):
def rule_func(__lowercase ) -> float:
_snake_case : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : int = create_rules_function(__lowercase , __lowercase )
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=1e-7 , __lowercase=1.0 , __lowercase=-1 ) -> List[Any]:
'''simple docstring'''
_snake_case : List[Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : Tuple = lr_init - lr_end
_snake_case : Any = num_training_steps - num_warmup_steps
_snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowercase , __lowercase , __lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = SchedulerType(__lowercase )
_snake_case : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowercase , last_epoch=__lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , )
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase ) | 670 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = """▁"""
__magic_name__ = {"""vocab_file""": """spiece.model"""}
__magic_name__ = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
__magic_name__ = {
"""google/reformer-crime-and-punishment""": 52_42_88,
}
class _snake_case ( _SCREAMING_SNAKE_CASE ):
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ["input_ids", "attention_mask"]
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Dict=[] , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
@property
def __UpperCAmelCase ( self : List[str] ):
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : str ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ):
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
if index < self.sp_model.get_piece_size():
lowerCamelCase__ = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
return token
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCamelCase__ = []
lowerCamelCase__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
lowerCamelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , """wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 717 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 258 | 0 |
def _A ( lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(_snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , _snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _A ( lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
if len(_snake_case ) <= 1:
return arr, 0
lowerCAmelCase__ = len(_snake_case ) // 2
lowerCAmelCase__ = arr[0:mid]
lowerCAmelCase__ = arr[mid:]
lowerCAmelCase__ , lowerCAmelCase__ = count_inversions_recursive(_snake_case )
lowerCAmelCase__ , lowerCAmelCase__ = count_inversions_recursive(_snake_case )
lowerCAmelCase__ , lowerCAmelCase__ = _count_cross_inversions(_snake_case , _snake_case )
lowerCAmelCase__ = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = lowerCAmelCase__ = lowerCAmelCase__ = 0
while i < len(_snake_case ) and j < len(_snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowerCAmelCase__ = count_inversions_bf(_snake_case )
lowerCAmelCase__ , lowerCAmelCase__ = count_inversions_recursive(_snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , _snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowerCAmelCase__ = count_inversions_bf(_snake_case )
lowerCAmelCase__ , lowerCAmelCase__ = count_inversions_recursive(_snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _snake_case )
# an empty list should also have zero inversions
lowerCAmelCase__ = []
lowerCAmelCase__ = count_inversions_bf(_snake_case )
lowerCAmelCase__ , lowerCAmelCase__ = count_inversions_recursive(_snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _snake_case )
if __name__ == "__main__":
main()
| 61 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = os.path.abspath(_snake_case )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
UpperCAmelCase = tf.train.list_variables(_snake_case )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
UpperCAmelCase = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
UpperCAmelCase = name[1:]
# figure out how many levels deep the name is
UpperCAmelCase = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(_snake_case )
# read data
UpperCAmelCase = tf.train.load_variable(_snake_case , _snake_case )
names.append("""/""".join(_snake_case ) )
arrays.append(_snake_case )
logger.info(F'''Read a total of {len(_snake_case ):,} layers''' )
# Sanity check
if len(set(_snake_case ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(_snake_case ) )})''' )
UpperCAmelCase = list(set(_snake_case ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(_snake_case , _snake_case ):
UpperCAmelCase = full_name.split("""/""" )
UpperCAmelCase = model
UpperCAmelCase = []
for i, m_name in enumerate(_snake_case ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
UpperCAmelCase = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
UpperCAmelCase = getattr(_snake_case , """embeddings""" )
UpperCAmelCase = getattr(_snake_case , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
UpperCAmelCase = getattr(_snake_case , """encoder""" )
UpperCAmelCase = getattr(_snake_case , """layer""" )
UpperCAmelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
UpperCAmelCase = getattr(_snake_case , """pooler""" )
UpperCAmelCase = getattr(_snake_case , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
UpperCAmelCase = getattr(_snake_case , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
UpperCAmelCase = getattr(_snake_case , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
UpperCAmelCase = getattr(_snake_case , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
UpperCAmelCase = getattr(_snake_case , """token_type_embeddings""" )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
UpperCAmelCase = getattr(_snake_case , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
UpperCAmelCase = getattr(_snake_case , """attention""" )
UpperCAmelCase = getattr(_snake_case , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
UpperCAmelCase = getattr(_snake_case , """attention""" )
UpperCAmelCase = getattr(_snake_case , """output""" )
UpperCAmelCase = getattr(_snake_case , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
UpperCAmelCase = getattr(_snake_case , """attention""" )
UpperCAmelCase = getattr(_snake_case , """output""" )
UpperCAmelCase = getattr(_snake_case , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
UpperCAmelCase = getattr(_snake_case , """output""" )
UpperCAmelCase = getattr(_snake_case , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
UpperCAmelCase = getattr(_snake_case , """output""" )
UpperCAmelCase = getattr(_snake_case , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
UpperCAmelCase = getattr(_snake_case , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
UpperCAmelCase = getattr(_snake_case , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
UpperCAmelCase = getattr(_snake_case , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
UpperCAmelCase = getattr(_snake_case , """intermediate""" )
UpperCAmelCase = getattr(_snake_case , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
UpperCAmelCase = getattr(_snake_case , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
UpperCAmelCase = getattr(_snake_case , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
UpperCAmelCase = getattr(_snake_case , """weight""" )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
UpperCAmelCase = """.""".join(_snake_case )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , _snake_case ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , _snake_case ):
UpperCAmelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
UpperCAmelCase = array.transpose()
if pointer.shape == array.shape:
UpperCAmelCase = torch.from_numpy(_snake_case )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
logger.info(F'''Loading model based on config from {config_path}...''' )
UpperCAmelCase = BertConfig.from_json_file(_snake_case )
UpperCAmelCase = BertModel(_snake_case )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , _snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
_UpperCamelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 341 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__A =logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
lowerCAmelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to SortishSamler or not.'} )
lowerCAmelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCAmelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'whether to use adafactor'} )
lowerCAmelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
lowerCAmelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
lowerCAmelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Dropout probability. Goes into model.config.'} )
lowerCAmelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
lowerCAmelCase__ = field(
default='linear' , metadata={'help': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 721 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'dandelin/vilt-b32-finetuned-vqa'
lowerCAmelCase__ = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
lowerCAmelCase__ = 'image_qa'
lowerCAmelCase__ = AutoProcessor
lowerCAmelCase__ = AutoModelForVisualQuestionAnswering
lowerCAmelCase__ = ['image', 'text']
lowerCAmelCase__ = ['text']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["vision"] )
super().__init__(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Tuple:
return self.pre_processor(lowercase , lowercase , return_tensors="pt" )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
with torch.no_grad():
return self.model(**lowercase ).logits
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
lowerCamelCase_ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 313 | 0 |
'''simple docstring'''
# Imports
import numpy as np
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , _lowercase : str=None , _lowercase : Optional[int]=None , _lowercase : Optional[Any]=None , _lowercase : List[Any]=None , _lowercase : Dict=None) -> Union[str, Any]:
self.set_matricies(red=_lowercase , green=_lowercase , blue=_lowercase , red_edge=_lowercase , nir=_lowercase)
def __snake_case ( self : Union[str, Any] , _lowercase : List[Any]=None , _lowercase : int=None , _lowercase : Dict=None , _lowercase : Dict=None , _lowercase : str=None) -> List[Any]:
if red is not None:
A_ = red
if green is not None:
A_ = green
if blue is not None:
A_ = blue
if red_edge is not None:
A_ = red_edge
if nir is not None:
A_ = nir
return True
def __snake_case ( self : List[Any] , _lowercase : Dict="" , _lowercase : Tuple=None , _lowercase : str=None , _lowercase : Dict=None , _lowercase : Any=None , _lowercase : str=None) -> str:
self.set_matricies(red=_lowercase , green=_lowercase , blue=_lowercase , red_edge=_lowercase , nir=_lowercase)
A_ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def __snake_case ( self : Union[str, Any]) -> Union[str, Any]:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __snake_case ( self : Dict) -> Optional[int]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __snake_case ( self : str) -> Optional[Any]:
return self.nir * (self.red / (self.green**2))
def __snake_case ( self : Dict) -> Optional[Any]:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __snake_case ( self : Dict) -> Dict:
return (self.nir - self.red) / (self.nir + self.red)
def __snake_case ( self : Union[str, Any]) -> Optional[int]:
return (self.nir - self.blue) / (self.nir + self.blue)
def __snake_case ( self : List[Any]) -> List[Any]:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __snake_case ( self : Tuple) -> Any:
return (self.nir - self.green) / (self.nir + self.green)
def __snake_case ( self : Tuple) -> Union[str, Any]:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __snake_case ( self : List[Any]) -> List[str]:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __snake_case ( self : Union[str, Any]) -> str:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __snake_case ( self : Optional[int]) -> int:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __snake_case ( self : Optional[Any] , _lowercase : str=0.08 , _lowercase : int=1.22 , _lowercase : Optional[int]=0.03) -> str:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __snake_case ( self : Optional[Any]) -> str:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __snake_case ( self : Optional[int]) -> Any:
return (self.nir / self.green) - 1
def __snake_case ( self : str) -> List[Any]:
return (self.nir / self.redEdge) - 1
def __snake_case ( self : Tuple) -> List[str]:
return (self.red - self.blue) / self.red
def __snake_case ( self : Union[str, Any]) -> Optional[int]:
A_ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def __snake_case ( self : Union[str, Any]) -> Tuple:
return self.nir - self.green
def __snake_case ( self : Tuple) -> Tuple:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __snake_case ( self : Union[str, Any]) -> Union[str, Any]:
A_ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def __snake_case ( self : Any , _lowercase : Union[str, Any]=0.16) -> List[Any]:
return (self.nir - self.green) / (self.nir + self.green + y)
def __snake_case ( self : List[str] , _lowercase : List[str]=0.5) -> Any:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __snake_case ( self : Tuple) -> Optional[Any]:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def __snake_case ( self : Dict , _lowercase : Union[str, Any]=None , _lowercase : Dict=None) -> Optional[Any]:
return (self.nir - b) / (a * self.red)
def __snake_case ( self : Optional[Any]) -> Any:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __snake_case ( self : Tuple) -> Optional[Any]:
return (self.red + self.green + self.blue) / 30.5
def __snake_case ( self : Tuple) -> Tuple:
return self.nir / self.red
def __snake_case ( self : Dict) -> Optional[int]:
return (self.rvi() - 1) / (self.rvi() + 1)
def __snake_case ( self : List[str]) -> Optional[Any]:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __snake_case ( self : List[Any]) -> Tuple:
return self.green / (self.nir + self.red + self.green)
def __snake_case ( self : Any) -> Optional[int]:
return self.nir / (self.nir + self.red + self.green)
def __snake_case ( self : Union[str, Any]) -> Any:
return self.red / (self.nir + self.red + self.green)
def __snake_case ( self : Union[str, Any]) -> Optional[int]:
return (self.green - self.red) / (self.green + self.red)
def __snake_case ( self : Any) -> Any:
return (self.red - self.green) / (self.red + self.green)
def __snake_case ( self : Union[str, Any]) -> List[str]:
A_ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
A_ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def __snake_case ( self : Tuple) -> Union[str, Any]:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __snake_case ( self : Any) -> List[Any]:
return self.nir / self.red
def __snake_case ( self : int) -> Optional[int]:
return (self.ndvi() + 0.5) ** (1 / 2)
def __snake_case ( self : Any) -> Dict:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 366 |
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __UpperCAmelCase ( lowerCAmelCase ,lowerCAmelCase ):
'''simple docstring'''
_UpperCamelCase = 1
@register_to_config
def __init__( self : str , _lowercase : int = 1_000 , _lowercase : Optional[Union[np.ndarray, List[float]]] = None) -> List[str]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(_lowercase)
# standard deviation of the initial noise distribution
A_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
A_ = 4
# running values
A_ = []
def __snake_case ( self : int , _lowercase : int , _lowercase : Union[str, torch.device] = None) -> Any:
A_ = num_inference_steps
A_ = torch.linspace(1 , 0 , num_inference_steps + 1)[:-1]
A_ = torch.cat([steps, torch.tensor([0.0])])
if self.config.trained_betas is not None:
A_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa)
else:
A_ = torch.sin(steps * math.pi / 2) ** 2
A_ = (1.0 - self.betas**2) ** 0.5
A_ = (torch.atana(self.betas , self.alphas) / math.pi * 2)[:-1]
A_ = timesteps.to(_lowercase)
A_ = []
def __snake_case ( self : Dict , _lowercase : torch.FloatTensor , _lowercase : int , _lowercase : torch.FloatTensor , _lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler')
A_ = (self.timesteps == timestep).nonzero().item()
A_ = timestep_index + 1
A_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowercase)
if len(self.ets) == 1:
A_ = self.ets[-1]
elif len(self.ets) == 2:
A_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets) == 3:
A_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
A_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
A_ = self._get_prev_sample(_lowercase , _lowercase , _lowercase , _lowercase)
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowercase)
def __snake_case ( self : Dict , _lowercase : torch.FloatTensor , *_lowercase : Optional[Any] , **_lowercase : int) -> torch.FloatTensor:
return sample
def __snake_case ( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]) -> Union[str, Any]:
A_ = self.alphas[timestep_index]
A_ = self.betas[timestep_index]
A_ = self.alphas[prev_timestep_index]
A_ = self.betas[prev_timestep_index]
A_ = (sample - sigma * ets) / max(_lowercase , 1E-8)
A_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[str]) -> Union[str, Any]:
return self.config.num_train_timesteps
| 366 | 1 |
import enum
import shutil
import sys
lowerCAmelCase__ , lowerCAmelCase__ = shutil.get_terminal_size()
lowerCAmelCase__ = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class lowercase ( enum.Enum ):
"""simple docstring"""
a__ = 0
a__ = 1
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple="" ) -> Union[str, Any]:
'''simple docstring'''
sys.stdout.write(str(UpperCAmelCase_ ) + end )
sys.stdout.flush()
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple="" ) -> Any:
'''simple docstring'''
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , UpperCAmelCase_ )
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
forceWrite('\r' )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def lowerCamelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 710 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if exitstatus == 5:
_UpperCamelCase : List[Any] = 0
# Doctest custom flag to ignore output.
lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase__ = doctest.OutputChecker
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case , __snake_case , __snake_case):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case)
lowerCAmelCase__ = CustomOutputChecker
lowerCAmelCase__ = HfDoctestModule
lowerCAmelCase__ = HfDocTestParser
| 648 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def A_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ) ) )
def A_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
_lowerCamelCase : Tuple = (
"Wrong input data's dimensions... "
F'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(_lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_lowerCamelCase : Tuple = (
"Wrong input data's shape... "
F'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(_lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_lowerCamelCase : List[str] = (
"Input data have different datatype... "
F'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
for value in value_array:
_lowerCamelCase : Optional[int] = euclidean(_lowerCAmelCase , dataset[0] )
_lowerCamelCase : Union[str, Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
_lowerCamelCase : int = euclidean(_lowerCAmelCase , _lowerCAmelCase )
if dist > temp_dist:
_lowerCamelCase : int = temp_dist
_lowerCamelCase : Union[str, Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def A_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
"""simple docstring"""
return np.dot(_lowerCAmelCase , _lowerCAmelCase ) / (norm(_lowerCAmelCase ) * norm(_lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 44 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class UpperCamelCase :
'''simple docstring'''
lowercase : Dict =BlenderbotConfig
lowercase : Dict ={}
lowercase : Dict ="""gelu"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=20 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0 , ):
lowercase_ :Union[str, Any] = parent
lowercase_ :Any = batch_size
lowercase_ :int = seq_length
lowercase_ :Any = is_training
lowercase_ :Union[str, Any] = use_labels
lowercase_ :Dict = vocab_size
lowercase_ :str = hidden_size
lowercase_ :List[str] = num_hidden_layers
lowercase_ :Optional[Any] = num_attention_heads
lowercase_ :List[Any] = intermediate_size
lowercase_ :Optional[int] = hidden_dropout_prob
lowercase_ :List[Any] = attention_probs_dropout_prob
lowercase_ :Optional[int] = max_position_embeddings
lowercase_ :Optional[int] = eos_token_id
lowercase_ :str = pad_token_id
lowercase_ :Optional[Any] = bos_token_id
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase_ :Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase_ :Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase_ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ :List[Any] = prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = TFBlenderbotModel(config=UpperCAmelCase_ ).get_decoder()
lowercase_ :Dict = inputs_dict['''input_ids''']
lowercase_ :int = input_ids[:1, :]
lowercase_ :Optional[int] = inputs_dict['''attention_mask'''][:1, :]
lowercase_ :Optional[int] = inputs_dict['''head_mask''']
lowercase_ :List[str] = 1
# first forward pass
lowercase_ :Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
lowercase_ , lowercase_ :List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase_ :str = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ :List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase_ :Dict = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase_ :int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase_ :str = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
lowercase_ :List[str] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase_ :List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase_ :List[str] = output_from_no_past[:, -3:, random_slice_idx]
lowercase_ :List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3 )
def UpperCamelCase ( _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
lowercase_ :List[str] = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase_ :Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase_ :Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ :Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ :Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowercase : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowercase : Union[str, Any] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowercase : int =(
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase : List[Any] =True
lowercase : Optional[int] =False
lowercase : Union[str, Any] =False
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = TFBlenderbotModelTester(self )
lowercase_ :Tuple = ConfigTester(self , config_class=UpperCAmelCase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
lowercase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : Dict =["""My friends are cool but they eat too many carbs."""]
lowercase : Union[str, Any] ="""facebook/blenderbot-400M-distill"""
@cached_property
def UpperCamelCase ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase ( self ):
lowercase_ :int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self ):
lowercase_ :Dict = self.tokenizer(self.src_text , return_tensors='''tf''' )
lowercase_ :List[Any] = self.model.generate(
model_inputs.input_ids , )
lowercase_ :Any = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCAmelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 711 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , '''decord''' )
self.check_model_type(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None ):
lowercase_ :int = {}
if frame_sampling_rate is not None:
lowercase_ :int = frame_sampling_rate
if num_frames is not None:
lowercase_ :int = num_frames
lowercase_ :str = {}
if top_k is not None:
lowercase_ :Optional[int] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , UpperCamelCase_ , **UpperCamelCase_ ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=1 ):
if num_frames is None:
lowercase_ :str = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
lowercase_ :str = BytesIO(requests.get(UpperCamelCase_ ).content )
lowercase_ :Optional[int] = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
lowercase_ :Tuple = 0
lowercase_ :Optional[Any] = num_frames * frame_sampling_rate - 1
lowercase_ :Any = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
lowercase_ :Dict = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
lowercase_ :List[Any] = list(UpperCamelCase_ )
lowercase_ :Any = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :List[str] = self.model(**UpperCamelCase_ )
return model_outputs
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=5 ):
if top_k > self.model.config.num_labels:
lowercase_ :List[str] = self.model.config.num_labels
if self.framework == "pt":
lowercase_ :Optional[int] = model_outputs.logits.softmax(-1 )[0]
lowercase_ , lowercase_ :Dict = probs.topk(UpperCamelCase_ )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
lowercase_ :Dict = scores.tolist()
lowercase_ :Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 441 | 0 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
__A = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a )} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """The input training data file (a text file)."""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Whether ot not to use whole word mask."""} )
__magic_name__ :float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__magic_name__ :float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
__magic_name__ :int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
__magic_name__ :int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) ->Optional[int]:
"""simple docstring"""
def _dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=_SCREAMING_SNAKE_CASE , )
return LineByLineTextDataset(tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_SCREAMING_SNAKE_CASE , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase__ :Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCAmelCase__ :List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
lowerCAmelCase__ :Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
lowerCAmelCase__ :int = AutoModelWithLMHead.from_config(_SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
lowerCAmelCase__ :Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase__ :Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCAmelCase__ :List[str] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase__ :Optional[int] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , evaluate=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase__ :str = DataCollatorForPermutationLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase__ :Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase__ :str = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase__ :Tuple = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , prediction_loss_only=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase__ :Tuple = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_SCREAMING_SNAKE_CASE )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ :Optional[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ :Any = trainer.evaluate()
lowerCAmelCase__ :Optional[Any] = math.exp(eval_output['eval_loss'] )
lowerCAmelCase__ :Dict = {'perplexity': perplexity}
lowerCAmelCase__ :List[Any] = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_SCREAMING_SNAKE_CASE )
return results
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 93 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase : Any = None
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Optional[int] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCamelCase : str = "▁"
# Segments (not really needed)
UpperCamelCase : str = 0
UpperCamelCase : int = 1
UpperCamelCase : List[Any] = 2
UpperCamelCase : Union[str, Any] = 3
UpperCamelCase : Optional[Any] = 4
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = """left"""
lowerCAmelCase = XLNetTokenizer
def __init__( self : Tuple , _lowercase : List[Any]=None , _lowercase : Any=None , _lowercase : int=False , _lowercase : Tuple=True , _lowercase : Union[str, Any]=False , _lowercase : int="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Dict="<unk>" , _lowercase : Optional[int]="<sep>" , _lowercase : int="<pad>" , _lowercase : Dict="<cls>" , _lowercase : str="<mask>" , _lowercase : List[str]=["<eop>", "<eod>"] , **_lowercase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
A = 3
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def __a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
A = [self.sep_token_id]
A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __a ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 690 | 0 |
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = False ):
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
__SCREAMING_SNAKE_CASE : Tuple = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
__SCREAMING_SNAKE_CASE : str = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(UpperCamelCase__ , 1 ):
if n < _p:
# then we have our last prime to check
__SCREAMING_SNAKE_CASE : List[str] = primes[:idx]
break
__SCREAMING_SNAKE_CASE : List[Any] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__SCREAMING_SNAKE_CASE : List[str] = False
for r in range(UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = pow(UpperCamelCase__ , d * 2**r , UpperCamelCase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__SCREAMING_SNAKE_CASE : Any = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __A ( ):
"""simple docstring"""
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 720 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def __A ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if not sentence:
return ""
__SCREAMING_SNAKE_CASE : str = dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 564 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase ={
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["MobileViTFeatureExtractor"]
__UpperCAmelCase =["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 546 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase ={
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 546 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : int = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
_snake_case : Any = dict(zip(__A, range(len(__A ) ) ) )
_snake_case : Optional[int] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
_snake_case : Optional[int] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16_000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
_snake_case : str = tempfile.mkdtemp()
_snake_case : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case : List[str] = os.path.join(self.tmpdirname, __A )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__A ) + """\n""" )
with open(self.feature_extraction_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__A ) + """\n""" )
# load decoder from hub
_snake_case : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def UpperCamelCase_ ( self: Any, **a_: str ):
'''simple docstring'''
_snake_case : Any = self.add_kwargs_tokens_map.copy()
kwargs.update(__A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **__A )
def UpperCamelCase_ ( self: List[str], **a_: Optional[Any] ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **__A )
def UpperCamelCase_ ( self: Optional[Any], **a_: str ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **__A )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.get_tokenizer()
_snake_case : List[Any] = self.get_feature_extractor()
_snake_case : int = self.get_decoder()
_snake_case : Optional[int] = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
processor.save_pretrained(self.tmpdirname )
_snake_case : str = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, __A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor, __A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, )
self.assertIsInstance(processor.decoder, __A )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_snake_case : List[str] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha, 5.0 )
self.assertEqual(processor.language_model.beta, 3.0 )
self.assertEqual(processor.language_model.score_boundary, -7.0 )
self.assertEqual(processor.language_model.unk_score_offset, 3 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : List[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__A, """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__A, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Any = self.get_feature_extractor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Union[str, Any] = self.get_decoder()
_snake_case : Any = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
_snake_case : Union[str, Any] = floats_list((3, 1_000) )
_snake_case : str = feature_extractor(__A, return_tensors="""np""" )
_snake_case : Optional[Any] = processor(__A, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = self.get_feature_extractor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : Dict = self.get_decoder()
_snake_case : Any = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
_snake_case : Union[str, Any] = """This is a test string"""
_snake_case : Any = processor(text=__A )
_snake_case : Tuple = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def UpperCamelCase_ ( self: List[Any], a_: int=(2, 10, 16), a_: List[Any]=77 ):
'''simple docstring'''
np.random.seed(__A )
return np.random.rand(*__A )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : int = self.get_feature_extractor()
_snake_case : Union[str, Any] = self.get_tokenizer()
_snake_case : Union[str, Any] = self.get_decoder()
_snake_case : List[Any] = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
_snake_case : str = self._get_dummy_logits(shape=(10, 16), seed=13 )
_snake_case : int = processor.decode(__A )
_snake_case : Tuple = decoder.decode_beams(__A )[0]
self.assertEqual(decoded_decoder[0], decoded_processor.text )
self.assertEqual("""</s> <s> </s>""", decoded_processor.text )
self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def UpperCamelCase_ ( self: List[Any], a_: Any ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_feature_extractor()
_snake_case : int = self.get_tokenizer()
_snake_case : Any = self.get_decoder()
_snake_case : List[Any] = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
_snake_case : Optional[int] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_snake_case : Tuple = processor.batch_decode(__A )
else:
with get_context(__A ).Pool() as pool:
_snake_case : Optional[int] = processor.batch_decode(__A, __A )
_snake_case : Optional[Any] = list(__A )
with get_context("""fork""" ).Pool() as p:
_snake_case : Optional[Any] = decoder.decode_beams_batch(__A, __A )
_snake_case , _snake_case , _snake_case : int = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__A, decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""], decoded_processor.text )
self.assertListEqual(__A, decoded_processor.logit_score )
self.assertListEqual(__A, decoded_processor.lm_score )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_feature_extractor()
_snake_case : Union[str, Any] = self.get_tokenizer()
_snake_case : Optional[int] = self.get_decoder()
_snake_case : Tuple = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
_snake_case : str = self._get_dummy_logits()
_snake_case : int = 15
_snake_case : List[str] = -20.0
_snake_case : Union[str, Any] = -4.0
_snake_case : str = processor.batch_decode(
__A, beam_width=__A, beam_prune_logp=__A, token_min_logp=__A, )
_snake_case : List[Any] = decoded_processor_out.text
_snake_case : Union[str, Any] = list(__A )
with get_context("""fork""" ).Pool() as pool:
_snake_case : Union[str, Any] = decoder.decode_beams_batch(
__A, __A, beam_width=__A, beam_prune_logp=__A, token_min_logp=__A, )
_snake_case : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
_snake_case : List[Any] = [d[0][2] for d in decoded_decoder_out]
_snake_case : Dict = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__A, __A )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""], __A )
self.assertTrue(np.array_equal(__A, decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447], __A, atol=1E-3 ) )
self.assertTrue(np.array_equal(__A, decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474], __A, atol=1E-3 ) )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Any = self.get_feature_extractor()
_snake_case : Union[str, Any] = self.get_tokenizer()
_snake_case : Optional[int] = self.get_decoder()
_snake_case : str = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
_snake_case : int = self._get_dummy_logits()
_snake_case : Optional[int] = 2.0
_snake_case : str = 5.0
_snake_case : List[str] = -20.0
_snake_case : Optional[Any] = True
_snake_case : Optional[int] = processor.batch_decode(
__A, alpha=__A, beta=__A, unk_score_offset=__A, lm_score_boundary=__A, )
_snake_case : Union[str, Any] = decoded_processor_out.text
_snake_case : List[str] = list(__A )
decoder.reset_params(
alpha=__A, beta=__A, unk_score_offset=__A, lm_score_boundary=__A, )
with get_context("""fork""" ).Pool() as pool:
_snake_case : Optional[int] = decoder.decode_beams_batch(
__A, __A, )
_snake_case : List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__A, __A )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""], __A )
_snake_case : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha, 2.0 )
self.assertEqual(lm_model.beta, 5.0 )
self.assertEqual(lm_model.unk_score_offset, -20.0 )
self.assertEqual(lm_model.score_boundary, __A )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_snake_case : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
_snake_case : Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_snake_case : Dict = os.listdir(__A )
_snake_case : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__A, __A )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
_snake_case : List[Any] = WavaVecaProcessorWithLM.from_pretrained(__A )
_snake_case : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
_snake_case : Any = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_snake_case : Union[str, Any] = os.listdir(__A )
_snake_case : Tuple = os.listdir(__A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__A, __A )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_snake_case : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_snake_case : Union[str, Any] = floats_list((3, 1_000) )
_snake_case : Optional[int] = processor_wavaveca(__A, return_tensors="""np""" )
_snake_case : Optional[int] = processor_auto(__A, return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1E-2 )
_snake_case : int = self._get_dummy_logits()
_snake_case : Optional[int] = processor_wavaveca.batch_decode(__A )
_snake_case : Optional[Any] = processor_auto.batch_decode(__A )
self.assertListEqual(decoded_wavaveca.text, decoded_auto.text )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_feature_extractor()
_snake_case : int = self.get_tokenizer()
_snake_case : Any = self.get_decoder()
_snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__A, feature_extractor=__A, decoder=__A )
self.assertListEqual(
processor.model_input_names, feature_extractor.model_input_names, msg="""`processor` and `feature_extractor` model input names do not match""", )
@staticmethod
def UpperCamelCase_ ( a_: Any, a_: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_snake_case : List[str] = self._get_dummy_logits()[0]
_snake_case : int = processor.decode(__A, output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__A, __A ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""], """word""" ) ), outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""], """word""" ), ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""], """start_offset""" ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""], """end_offset""" ), [1, 3, 5] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_snake_case : Tuple = self._get_dummy_logits()
_snake_case : Dict = processor.batch_decode(__A, output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__A, __A ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__A, """word""" ) ) for o in outputs["""word_offsets"""]], outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0], """word""" ), ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0], """start_offset""" ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0], """end_offset""" ), [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
import torch
_snake_case : Tuple = load_dataset("""common_voice""", """en""", split="""train""", streaming=__A )
_snake_case : Optional[Any] = ds.cast_column("""audio""", datasets.Audio(sampling_rate=16_000 ) )
_snake_case : int = iter(__A )
_snake_case : Union[str, Any] = next(__A )
_snake_case : Union[str, Any] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
_snake_case : str = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_snake_case : Union[str, Any] = processor(sample["""audio"""]["""array"""], return_tensors="""pt""" ).input_values
with torch.no_grad():
_snake_case : Any = model(__A ).logits.cpu().numpy()
_snake_case : Tuple = processor.decode(logits[0], output_word_offsets=__A )
_snake_case : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_snake_case : List[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
_snake_case : List[Any] = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__A, """word""" ) ), __A )
self.assertEqual(""" """.join(self.get_from_offsets(__A, """word""" ) ), output.text )
# output times
_snake_case : List[str] = torch.tensor(self.get_from_offsets(__A, """start_time""" ) )
_snake_case : str = torch.tensor(self.get_from_offsets(__A, """end_time""" ) )
# fmt: off
_snake_case : List[Any] = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
_snake_case : Dict = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__A, __A, atol=0.01 ) )
self.assertTrue(torch.allclose(__A, __A, atol=0.01 ) )
| 717 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ = [ord(letter) for letter in string.ascii_lowercase]
A_ = {ord(char) for char in VALID_CHARS}
A_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : tuple[int, ...] ):
"""simple docstring"""
_snake_case : str = ""
_snake_case : int
_snake_case : int
_snake_case : int
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
_snake_case : List[str] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def UpperCAmelCase__ (snake_case__ : list[int] ):
"""simple docstring"""
_snake_case : list[str] = []
for key in product(snake_case__ , repeat=3 ):
_snake_case : List[Any] = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ (snake_case__ : str = "p059_cipher.txt" ):
"""simple docstring"""
_snake_case : list[int]
_snake_case : list[str]
_snake_case : str
_snake_case : str
_snake_case : str = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding="""utf-8""" )
_snake_case : List[Any] = [int(snake_case__ ) for number in data.strip().split(""",""" )]
_snake_case : Optional[Any] = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
_snake_case : Union[str, Any] = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
_snake_case : Optional[int] = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__a: Optional[int] = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: str = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[int] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__a: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 108 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A ={
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
A =[
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
A =[
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
A =[
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def snake_case_ (_a : List[str] ):
for param in module.parameters():
UpperCAmelCase = False
def snake_case_ ():
UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def snake_case_ (_a : Optional[Any] ):
UpperCAmelCase = plt.imshow(_a )
fig.axes.get_xaxis().set_visible(_a )
fig.axes.get_yaxis().set_visible(_a )
plt.show()
def snake_case_ ():
UpperCAmelCase = datetime.now()
UpperCAmelCase = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 358 | 0 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowerCamelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
_lowerCamelCase = dataset.iloc[:, 1:2].values
_lowerCamelCase = dataset.iloc[:, 2].values
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = train_test_split(X, y, test_size=0.2, random_state=0)
_lowerCamelCase = PolynomialFeatures(degree=4)
_lowerCamelCase = poly_reg.fit_transform(X)
_lowerCamelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def SCREAMING_SNAKE_CASE__ ( ):
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color="""red""" )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003 | 6 |
from torch import nn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' ) | 6 | 1 |
"""simple docstring"""
a : str = 8.314_4598
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a : Any = 300
a : Dict = 28
a : Dict = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 31 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a : Tuple = None
a : Any = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
a : str = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a : Union[str, Any] = '''▁'''
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[str, Any] =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] =AlbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a : Optional[int] = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : Dict = do_lower_case
a : Any = remove_space
a : Optional[Any] = keep_accents
a : List[str] = vocab_file
a : Optional[Any] = False if not self.vocab_file else True
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31 | 1 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Dict = """"""
for word_or_phrase in separated:
if not isinstance(_UpperCamelCase , _UpperCamelCase):
raise Exception("join() accepts only strings to be joined")
joined += word_or_phrase + separator
return joined.strip(_UpperCamelCase)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 250 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : List[Any] = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 139 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCAmelCase = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 218 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : str =(DEISMultistepScheduler,)
lowerCamelCase : Optional[int] =(("num_inference_steps", 25),)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Dict=0 , **lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = dict(self.forward_default_kwargs )
__lowerCAmelCase : List[Any] = kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
__lowerCAmelCase : List[str] = self.dummy_sample
__lowerCAmelCase : List[Any] = 0.1 * sample
__lowerCAmelCase : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : str = self.get_scheduler_config(**lowerCAmelCase )
__lowerCAmelCase : str = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = scheduler_class.from_pretrained(lowerCAmelCase )
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase : int = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase ,__lowerCAmelCase : Optional[int] = sample, sample
for t in range(lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
__lowerCAmelCase : Optional[int] = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Optional[int]=0 , **lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = dict(self.forward_default_kwargs )
__lowerCAmelCase : int = kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
__lowerCAmelCase : int = self.dummy_sample
__lowerCAmelCase : Any = 0.1 * sample
__lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : Tuple = self.get_scheduler_config()
__lowerCAmelCase : str = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = scheduler_class.from_pretrained(lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase : Any = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
__lowerCAmelCase : int = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
if scheduler is None:
__lowerCAmelCase : str = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config(**lowerCAmelCase )
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase : Optional[int] = self.get_scheduler_config(**lowerCAmelCase )
__lowerCAmelCase : str = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = 10
__lowerCAmelCase : Any = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
__lowerCAmelCase : Dict = kwargs.pop("""num_inference_steps""" , lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase : str = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = self.dummy_sample
__lowerCAmelCase : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase , """set_timesteps""" ):
__lowerCAmelCase : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
__lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
__lowerCAmelCase : Any = scheduler.timesteps[5]
__lowerCAmelCase : Tuple = scheduler.timesteps[6]
__lowerCAmelCase : Dict = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
__lowerCAmelCase : str = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = DEISMultistepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase : str = self.full_loop(scheduler=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
__lowerCAmelCase : Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase : int = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase : Tuple = self.full_loop(scheduler=lowerCAmelCase )
__lowerCAmelCase : Dict = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , algorithm_type="""deis""" , solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
__lowerCAmelCase : str = self.full_loop(
solver_order=lowerCAmelCase , solver_type=lowerCAmelCase , prediction_type=lowerCAmelCase , algorithm_type=lowerCAmelCase , )
assert not torch.isnan(lowerCAmelCase ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCAmelCase )
self.check_over_configs(lower_order_final=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCAmelCase , time_step=0 )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.full_loop()
__lowerCAmelCase : List[str] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.full_loop(prediction_type="""v_prediction""" )
__lowerCAmelCase : List[str] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Optional[int] = self.get_scheduler_config(thresholding=lowerCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase )
__lowerCAmelCase : Tuple = 10
__lowerCAmelCase : int = self.dummy_model()
__lowerCAmelCase : List[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Optional[Any] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[str] = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 218 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def UpperCamelCase ( _lowerCamelCase : Any , _lowerCamelCase : Optional[Any]=10 ):
A__ = []
for _ in range(_lowerCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def UpperCamelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str]=10 ):
A__ = []
for step in range(_lowerCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(_lowerCamelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , _lowerCamelCase )
A__ = torch.load(_lowerCamelCase )
scheduler.load_state_dict(_lowerCamelCase )
return lrs
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :int , lowercase_ :Any , lowercase_ :List[Any] , lowercase_ :Optional[Any] )-> int:
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ )
def UpperCAmelCase_ ( self :Union[str, Any] )-> Dict:
A__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase_ )
A__ = torch.tensor([0.4, 0.2, -0.5] )
A__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_00 ):
A__ = criterion(lowercase_ , lowercase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def UpperCAmelCase_ ( self :Tuple )-> List[str]:
A__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase_ )
A__ = torch.tensor([0.4, 0.2, -0.5] )
A__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowercase_ , weight_decay=0.0 , relative_step=lowercase_ , scale_parameter=lowercase_ , warmup_init=lowercase_ , )
for _ in range(10_00 ):
A__ = criterion(lowercase_ , lowercase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
__lowercase = nn.Linear(50 , 50 ) if is_torch_available() else None
__lowercase = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
__lowercase = 10
def UpperCAmelCase_ ( self :Tuple , lowercase_ :Any , lowercase_ :List[Any] , lowercase_ :List[Any] , lowercase_ :str=None )-> Optional[int]:
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ , msg=lowercase_ )
def UpperCAmelCase_ ( self :Optional[Any] )-> Any:
A__ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A__ = {
get_constant_schedule: ({}, [1_0.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],
),
}
for scheduler_func, data in scheds.items():
A__, A__ = data
A__ = scheduler_func(self.optimizer , **lowercase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A__ = unwrap_schedule(lowercase_ , self.num_steps )
self.assertListAlmostEqual(
lowercase_ , lowercase_ , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
A__ = scheduler_func(self.optimizer , **lowercase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowercase_ ) # wrap to test picklability of the schedule
A__ = unwrap_and_save_reload_schedule(lowercase_ , self.num_steps )
self.assertListEqual(lowercase_ , lowercase_ , msg=F"failed for {scheduler_func} in save and reload" )
class UpperCAmelCase :
def __init__( self :str , lowercase_ :List[str] )-> Tuple:
A__ = fn
def __call__( self :List[Any] , *lowercase_ :Dict , **lowercase_ :Dict )-> Tuple:
return self.fn(*lowercase_ , **lowercase_ )
@classmethod
def UpperCAmelCase_ ( self :Any , lowercase_ :Tuple )-> List[Any]:
A__ = list(map(self , scheduler.lr_lambdas ) )
| 440 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__lowerCamelCase : str = logging.get_logger(__name__)
@dataclass
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : str , **_lowercase : Tuple ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__ = deprecated_arg[3:]
SCREAMING_SNAKE_CASE__ = not kwargs.pop(_lowercase )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""tpu_name""" , self.tpu_name )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""device_idx""" , self.device_idx )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""eager_mode""" , self.eager_mode )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowercase )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={"help": "Name of TPU"} , )
lowerCAmelCase_ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCAmelCase_ = field(default=lowerCamelCase_ , metadata={"help": "Benchmark models in eager model."} )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def __a ( self : Dict ):
"""simple docstring"""
requires_backends(self , ["""tf"""] )
SCREAMING_SNAKE_CASE__ = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE__ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE__ = None
return tpu
@cached_property
def __a ( self : Dict ):
"""simple docstring"""
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE__ = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
SCREAMING_SNAKE_CASE__ = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
SCREAMING_SNAKE_CASE__ = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def __a ( self : List[str] ):
"""simple docstring"""
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def __a ( self : Tuple ):
"""simple docstring"""
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def __a ( self : List[str] ):
"""simple docstring"""
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def __a ( self : List[str] ):
"""simple docstring"""
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __a ( self : Optional[int] ):
"""simple docstring"""
return self.n_gpu > 0
| 379 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class __snake_case ( lowerCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCAmelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase_ = Features({"text": Value("string" )} )
lowerCAmelCase_ = Features({"labels": ClassLabel} )
lowerCAmelCase_ = "text"
lowerCAmelCase_ = "labels"
def __a ( self : Dict , _lowercase : List[Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _lowercase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def __a ( self : List[Any] ):
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 379 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
a__ = logging.get_logger(__name__)
a__ = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
@add_start_docstrings(lowerCAmelCase )
def __call__( self : int , lowerCAmelCase : torch.LongTensor , lowerCAmelCase : torch.FloatTensor , **lowerCAmelCase : int ) -> bool:
"""simple docstring"""
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , lowerCAmelCase : int , lowerCAmelCase : Optional[int] = None ) -> Dict:
"""simple docstring"""
__UpperCamelCase : int = max_length
__UpperCamelCase : Optional[int] = max_position_embeddings
@add_start_docstrings(lowerCAmelCase )
def __call__( self : int , lowerCAmelCase : torch.LongTensor , lowerCAmelCase : torch.FloatTensor , **lowerCAmelCase : Any ) -> bool:
"""simple docstring"""
__UpperCamelCase : str = input_ids.shape[-1]
__UpperCamelCase : Dict = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"""with `max_length = start_length + max_new_tokens` instead.""" , lowerCAmelCase , )
__UpperCamelCase : List[str] = start_length
__UpperCamelCase : Dict = max_new_tokens
__UpperCamelCase : int = start_length + max_new_tokens
@add_start_docstrings(lowerCAmelCase )
def __call__( self : int , lowerCAmelCase : torch.LongTensor , lowerCAmelCase : torch.FloatTensor , **lowerCAmelCase : List[str] ) -> bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase : float , lowerCAmelCase : Optional[float] = None ) -> Dict:
"""simple docstring"""
__UpperCamelCase : Dict = max_time
__UpperCamelCase : int = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowerCAmelCase )
def __call__( self : List[str] , lowerCAmelCase : torch.LongTensor , lowerCAmelCase : torch.FloatTensor , **lowerCAmelCase : List[str] ) -> bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
@add_start_docstrings(lowerCAmelCase )
def __call__( self : int , lowerCAmelCase : torch.LongTensor , lowerCAmelCase : torch.FloatTensor , **lowerCAmelCase : str ) -> bool:
"""simple docstring"""
return any(criteria(lowerCAmelCase , lowerCAmelCase ) for criteria in self )
@property
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return stopping_criterium.max_length
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
return stopping_criterium.max_length
return None
def A__ (snake_case : StoppingCriteriaList , snake_case : int ) -> StoppingCriteriaList:
__UpperCamelCase : str = stopping_criteria.max_length
__UpperCamelCase : Optional[Any] = deepcopy(snake_case )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , snake_case )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=snake_case ) )
return new_stopping_criteria
| 279 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
__magic_name__ : Dict = 'linear'
__magic_name__ : Dict = 'cosine'
__magic_name__ : Optional[int] = 'cosine_with_restarts'
__magic_name__ : List[str] = 'polynomial'
__magic_name__ : Any = 'constant'
__magic_name__ : Union[str, Any] = 'constant_with_warmup'
__magic_name__ : str = 'piecewise_constant'
def A__ (snake_case : Optimizer , snake_case : int = -1 ) -> Optional[Any]:
return LambdaLR(snake_case , lambda snake_case : 1 , last_epoch=snake_case )
def A__ (snake_case : Optimizer , snake_case : int , snake_case : int = -1 ) -> List[Any]:
def lr_lambda(snake_case : int ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1.0 , snake_case ) )
return 1.0
return LambdaLR(snake_case , snake_case , last_epoch=snake_case )
def A__ (snake_case : Optimizer , snake_case : str , snake_case : int = -1 ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = {}
__UpperCamelCase : int = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCamelCase , __UpperCamelCase : Tuple = rule_str.split(""":""" )
__UpperCamelCase : int = int(snake_case )
__UpperCamelCase : Union[str, Any] = float(snake_case )
__UpperCamelCase : Optional[int] = value
__UpperCamelCase : Dict = float(rule_list[-1] )
def create_rules_function(snake_case : List[str] , snake_case : Any ):
def rule_func(snake_case : int ) -> float:
__UpperCamelCase : Any = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCamelCase : Tuple = create_rules_function(snake_case , snake_case )
return LambdaLR(snake_case , snake_case , last_epoch=snake_case )
def A__ (snake_case : int , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : str=-1 ) -> str:
def lr_lambda(snake_case : int ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(snake_case , snake_case , snake_case )
def A__ (snake_case : Optimizer , snake_case : int , snake_case : int , snake_case : float = 0.5 , snake_case : int = -1 ) -> List[str]:
def lr_lambda(snake_case : Dict ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
__UpperCamelCase : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(snake_case ) * 2.0 * progress )) )
return LambdaLR(snake_case , snake_case , snake_case )
def A__ (snake_case : Optimizer , snake_case : int , snake_case : int , snake_case : int = 1 , snake_case : int = -1 ) -> Tuple:
def lr_lambda(snake_case : Optional[int] ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
__UpperCamelCase : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(snake_case ) * progress) % 1.0) )) )
return LambdaLR(snake_case , snake_case , snake_case )
def A__ (snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : str=1e-7 , snake_case : List[str]=1.0 , snake_case : Dict=-1 ) -> Tuple:
__UpperCamelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(snake_case : int ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCamelCase : List[str] = lr_init - lr_end
__UpperCamelCase : Any = num_training_steps - num_warmup_steps
__UpperCamelCase : List[str] = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCamelCase : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(snake_case , snake_case , snake_case )
a__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def A__ (snake_case : Union[str, SchedulerType] , snake_case : Optimizer , snake_case : Optional[str] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : int = 1 , snake_case : float = 1.0 , snake_case : int = -1 , ) -> Dict:
__UpperCamelCase : List[str] = SchedulerType(snake_case )
__UpperCamelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(snake_case , last_epoch=snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(snake_case , step_rules=snake_case , last_epoch=snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(snake_case , num_warmup_steps=snake_case , last_epoch=snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
snake_case , num_warmup_steps=snake_case , num_training_steps=snake_case , num_cycles=snake_case , last_epoch=snake_case , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
snake_case , num_warmup_steps=snake_case , num_training_steps=snake_case , power=snake_case , last_epoch=snake_case , )
return schedule_func(
snake_case , num_warmup_steps=snake_case , num_training_steps=snake_case , last_epoch=snake_case )
| 279 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE : Optional[Any] = TypeVar('''T''')
__SCREAMING_SNAKE_CASE : List[str] = TypeVar('''U''')
class lowerCamelCase_( Generic[T, U] ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = key
_lowerCamelCase = val
_lowerCamelCase = None
_lowerCamelCase = None
def __repr__( self ):
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class lowerCamelCase_( Generic[T, U] ):
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = DoubleLinkedListNode(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = DoubleLinkedListNode(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase , _lowerCamelCase = self.rear, self.head
def __repr__( self ):
_lowerCamelCase = ['''DoubleLinkedList''']
_lowerCamelCase = self.head
while node.next is not None:
rep.append(str(lowerCamelCase__ ) )
_lowerCamelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCamelCase = node
_lowerCamelCase = previous
_lowerCamelCase = node
_lowerCamelCase = self.rear
def snake_case__ ( self , lowerCamelCase__ ):
if node.prev is None or node.next is None:
return None
_lowerCamelCase = node.next
_lowerCamelCase = node.prev
_lowerCamelCase = None
_lowerCamelCase = None
return node
class lowerCamelCase_( Generic[T, U] ):
'''simple docstring'''
lowercase__ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , lowerCamelCase__ ):
_lowerCamelCase = DoubleLinkedList()
_lowerCamelCase = capacity
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = {}
def __repr__( self ):
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , lowerCamelCase__ ):
return key in self.cache
def snake_case__ ( self , lowerCamelCase__ ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_lowerCamelCase = self.cache[key]
_lowerCamelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase__ )
return node.val
self.miss += 1
return None
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCamelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCamelCase = DoubleLinkedListNode(lowerCamelCase__ , lowerCamelCase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCamelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCamelCase = value
self.list.add(lowerCamelCase__ )
@classmethod
def snake_case__ ( cls , lowerCamelCase__ = 1_2_8 ):
def cache_decorator_inner(lowerCamelCase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCamelCase = LRUCache(lowerCamelCase__ )
_lowerCamelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCamelCase = func(*lowerCamelCase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase__ , '''cache_info''' , lowerCamelCase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase_( lowercase_ : list[Any] ) -> None:
create_state_space_tree(lowercase_ , [] , 0 )
def lowerCAmelCase_( lowercase_ : list[Any] , lowercase_ : list[Any] , lowercase_ : int ) -> None:
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 623 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
snake_case = imread(r"digital_image_processing/image_data/lena_small.jpg")
snake_case = cvtColor(img, COLOR_BGR2GRAY)
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Any = cn.convert_to_negative(lowerCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCamelCase_ ( ):
"""simple docstring"""
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase__ , 1_10 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : List[str] = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCAmelCase : str = canny.canny(lowerCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCamelCase_ ( ):
"""simple docstring"""
assert gg.gaussian_filter(lowerCAmelCase__ , 5 , sigma=0.9 ).all()
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : str = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_lowerCAmelCase : List[Any] = conv.img_convolve(lowerCAmelCase__ , lowerCAmelCase__ ).astype(lowerCAmelCase__ )
assert res.any()
def UpperCamelCase_ ( ):
"""simple docstring"""
assert med.median_filter(lowerCAmelCase__ , 3 ).any()
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase : Any = sob.sobel_filter(lowerCAmelCase__ )
assert grad.any() and theta.any()
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Any = sp.make_sepia(lowerCAmelCase__ , 20 )
assert sepia.all()
def UpperCamelCase_ ( lowerCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
_lowerCAmelCase : Tuple = bs.Burkes(imread(lowerCAmelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCamelCase_ ( lowerCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = rs.NearestNeighbour(imread(lowerCAmelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : str = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
_lowerCAmelCase : int = imread(lowerCAmelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[Any] = image[x_coordinate][y_coordinate]
_lowerCAmelCase : Union[str, Any] = lbp.get_neighbors_pixel(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCAmelCase : Any = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_lowerCAmelCase : List[str] = lbp.local_binary_value(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
assert lbp_image.any()
| 424 | import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( snake_case__ ,snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = IFImgaImgSuperResolutionPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def SCREAMING_SNAKE_CASE__ ( self ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
_lowerCAmelCase : Any = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase : Dict = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
_lowerCAmelCase : int = floats_tensor((1, 3, 16, 16) , rng=random.Random(_snake_case ) ).to(_snake_case )
_lowerCAmelCase : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def SCREAMING_SNAKE_CASE__ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 424 | 1 |
from PIL import Image
def a ( A__ : int ) -> Optional[Any]:
"""simple docstring"""
_lowercase =image.size
_lowercase =0
_lowercase =image.load()
for i in range(A__ ):
for j in range(A__ ):
_lowercase =pixels[j, i]
mean += pixel
mean //= width * height
for j in range(A__ ):
for i in range(A__ ):
_lowercase =255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowercase_ = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 714 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['BeitFeatureExtractor']
lowercase_ = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 380 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_a = self.diffusers_dir
shutil.copy(
os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]:
"""simple docstring"""
_a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
_a = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
# Copy consistency with a really long name
_a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
| 22 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def a ( self ) -> Optional[int]:
"""simple docstring"""
__snake_case = tempfile.mkdtemp()
# fmt: off
__snake_case = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__snake_case = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
__snake_case = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__snake_case = {"""unk_token""": """<unk>"""}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_UpperCamelCase ) )
__snake_case = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__snake_case = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
def a ( self , **_UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def a ( self , **_UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def a ( self , **_UpperCamelCase ) -> List[str]:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def a ( self ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a ( self ) -> Optional[Any]:
"""simple docstring"""
__snake_case = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a ( self ) -> List[Any]:
"""simple docstring"""
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = self.get_image_processor()
__snake_case = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__snake_case = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase )
__snake_case = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__snake_case = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCamelCase )
def a ( self ) -> List[str]:
"""simple docstring"""
__snake_case = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__snake_case = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0 )
__snake_case = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def a ( self ) -> Any:
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(_UpperCamelCase , return_tensors="""np""" )
__snake_case = processor(images=_UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a ( self ) -> str:
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
__snake_case = """lower newer"""
__snake_case = processor(text=_UpperCamelCase )
__snake_case = tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self ) -> str:
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
__snake_case = """lower newer"""
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def a ( self ) -> Any:
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(_UpperCamelCase )
__snake_case = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def a ( self ) -> int:
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = CLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
__snake_case = """lower newer"""
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 268 | 0 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
A__ = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(_A ):
os.makedirs(_A )
A__ = model.state_dict()
def to_tf_var_name(UpperCamelCase__ ):
for patt, repl in iter(_A ):
A__ = name.replace(_A , _A )
return F'''bert/{name}'''
def create_tf_var(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ = tf.dtypes.as_dtype(tensor.dtype )
A__ = tf.get_variable(dtype=_A , shape=tensor.shape , name=_A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
A__ = to_tf_var_name(_A )
A__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
A__ = torch_tensor.T
A__ = create_tf_var(tensor=_A , name=_A , session=_A )
tf.keras.backend.set_value(_A , _A )
A__ = session.run(_A )
print(F'''Successfully created {tf_name}: {np.allclose(_A , _A )}''' )
A__ = tf.train.Saver(tf.trainable_variables() )
saver.save(_A , os.path.join(_A , model_name.replace('-' , '_' ) + '.ckpt' ) )
def UpperCAmelCase ( UpperCamelCase__=None ):
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=_A , required=_A , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=_A , default=_A , required=_A , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=_A , required=_A , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=_A , required=_A , help='Directory in which to save tensorflow model' )
A__ = parser.parse_args(_A )
A__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 709 | """simple docstring"""
import numpy as np
from PIL import Image
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = np.array(UpperCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
A__ = 0
A__ = 0
A__ = 0
A__ = 0
# compute the shape of the output matrix
A__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
A__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
A__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A__ = 0
A__ = 0
return updated_arr
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = np.array(UpperCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
A__ = 0
A__ = 0
A__ = 0
A__ = 0
# compute the shape of the output matrix
A__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
A__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
A__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A__ = 0
A__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
__lowerCamelCase = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 536 | 0 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> bool:
_UpperCamelCase : Optional[Any] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case__ ( UpperCamelCase = 50_00 ) -> int:
_UpperCamelCase : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 ,UpperCamelCase )]
for i, pentagonal_i in enumerate(UpperCamelCase ):
for j in range(UpperCamelCase ,len(UpperCamelCase ) ):
_UpperCamelCase : Tuple = pentagonal_nums[j]
_UpperCamelCase : int = pentagonal_i + pentagonal_j
_UpperCamelCase : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(UpperCamelCase ) and is_pentagonal(UpperCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 683 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 | 1 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ (__lowercase ):
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> int:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCAmelCase_ = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , _a , standard_warn=_a )
lowerCAmelCase_ = dict(scheduler.config )
lowerCAmelCase_ = 1
lowerCAmelCase_ = FrozenDict(_a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCAmelCase_ = (
f"The configuration file of this scheduler: {scheduler} has not set the configuration"
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , _a , standard_warn=_a )
lowerCAmelCase_ = dict(scheduler.config )
lowerCAmelCase_ = True
lowerCAmelCase_ = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __a ( self , _a = "auto" ) -> str:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __a ( self ) -> int:
self.enable_attention_slicing(_a )
def __a ( self ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCAmelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __a ( self ) -> str:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ) -> Union[str, Any]:
lowerCAmelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCAmelCase_ = self.segmentation_model(**_a )
lowerCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCAmelCase_ = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCAmelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 719 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __magic_name__ (unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def __a ( self , _a ) -> Tuple:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a , config_name=_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a , config_name=_a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _a )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = AutoConfig.from_pretrained("gpt2" )
lowerCAmelCase_ = GenerationConfig.from_model_config(_a )
lowerCAmelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_a , _a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = GenerationConfig()
lowerCAmelCase_ = {
"max_new_tokens": 1024,
"foo": "bar",
}
lowerCAmelCase_ = copy.deepcopy(_a )
lowerCAmelCase_ = generation_config.update(**_a )
# update_kwargs was not modified (no side effects)
self.assertEqual(_a , _a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_a , {"foo": "bar"} )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = GenerationConfig()
lowerCAmelCase_ = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
lowerCAmelCase_ = GenerationConfig.from_model_config(_a )
assert not hasattr(_a , "foo" ) # no new kwargs should be initialized if from config
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _a )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
lowerCAmelCase_ = GenerationConfig.from_pretrained(_a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __magic_name__ (unittest.TestCase ):
@classmethod
def __a ( cls ) -> Optional[Any]:
lowerCAmelCase_ = TOKEN
HfFolder.save_token(_a )
@classmethod
def __a ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id="test-generation-config" , push_to_hub=_a , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id="valid_org/test-generation-config-org" , push_to_hub=_a , use_auth_token=self._token )
lowerCAmelCase_ = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
| 226 | 0 |
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ):
assert x is not None
assert y is not None
lowerCAmelCase_ : List[str] = len(UpperCamelCase__ )
lowerCAmelCase_ : Optional[Any] = len(UpperCamelCase__ )
# declaring the array for storing the dp values
lowerCAmelCase_ : int = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 ,m + 1 ):
for j in range(1 ,n + 1 ):
lowerCAmelCase_ : Tuple = 1 if x[i - 1] == y[j - 1] else 0
lowerCAmelCase_ : Tuple = max(l[i - 1][j] ,l[i][j - 1] ,l[i - 1][j - 1] + match )
lowerCAmelCase_ : Dict = ''''''
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = m, n
while i > 0 and j > 0:
lowerCAmelCase_ : Optional[int] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
lowerCAmelCase_ : int = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
A__ : Optional[int] = "AGGTAB"
A__ : Dict = "GXTXAYB"
A__ : int = 4
A__ : List[Any] = "GTAB"
A__ : List[str] = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 171 | '''simple docstring'''
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
if (
not isinstance(UpperCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
if (
not isinstance(UpperCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262 | 0 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class a__ ( _lowercase ):
def __init__( self :Dict , **_lowerCamelCase :Tuple ):
'''simple docstring'''
super().__init__(**A_ )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(A_ )
def lowerCamelCase_ ( self :Any , **_lowerCamelCase :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : Dict ={}
UpperCamelCase_ : Dict ={}
UpperCamelCase_ : Dict ={}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase_ : Optional[Any] =kwargs['points_per_batch']
if "points_per_crop" in kwargs:
UpperCamelCase_ : int =kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
UpperCamelCase_ : Any =kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
UpperCamelCase_ : Optional[Any] =kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase_ : List[str] =kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase_ : int =kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
UpperCamelCase_ : Dict =kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
UpperCamelCase_ : List[str] =kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
UpperCamelCase_ : Tuple =kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
UpperCamelCase_ : Union[str, Any] =kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
UpperCamelCase_ : Tuple =kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
UpperCamelCase_ : str =kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self :Dict , _lowerCamelCase :Tuple , *_lowerCamelCase :Tuple , _lowerCamelCase :Tuple=None , _lowerCamelCase :Any=None , **_lowerCamelCase :Union[str, Any] ):
'''simple docstring'''
return super().__call__(A_ , *A_ , num_workers=A_ , batch_size=A_ , **A_ )
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :Optional[int] , _lowerCamelCase :Optional[int]=64 , _lowerCamelCase :int = 0 , _lowerCamelCase :float = 512 / 1_500 , _lowerCamelCase :Optional[int] = 32 , _lowerCamelCase :Optional[int] = 1 , ):
'''simple docstring'''
UpperCamelCase_ : Any =load_image(A_ )
UpperCamelCase_ : Dict =self.image_processor.size['longest_edge']
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Optional[Any] =self.image_processor.generate_crop_boxes(
A_ , A_ , A_ , A_ , A_ , A_ )
UpperCamelCase_ : Union[str, Any] =self.image_processor(images=A_ , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase_ : Tuple =self.get_inference_context()
with inference_context():
UpperCamelCase_ : int =self._ensure_tensor_on_device(A_ , device=self.device )
UpperCamelCase_ : List[str] =self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
UpperCamelCase_ : List[Any] =image_embeddings
UpperCamelCase_ : Optional[Any] =grid_points.shape[1]
UpperCamelCase_ : str =points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , A_ , A_ ):
UpperCamelCase_ : Union[str, Any] =grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase_ : int =input_labels[:, i : i + points_per_batch]
UpperCamelCase_ : Any =i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Union[str, Any]=0.88 , _lowerCamelCase :List[Any]=0.95 , _lowerCamelCase :Union[str, Any]=0 , _lowerCamelCase :str=1 , ):
'''simple docstring'''
UpperCamelCase_ : Dict =model_inputs.pop('input_boxes' )
UpperCamelCase_ : int =model_inputs.pop('is_last' )
UpperCamelCase_ : Tuple =model_inputs.pop('original_sizes' ).tolist()
UpperCamelCase_ : Tuple =model_inputs.pop('reshaped_input_sizes' ).tolist()
UpperCamelCase_ : List[Any] =self.model(**A_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase_ : Tuple =model_outputs['pred_masks']
UpperCamelCase_ : Union[str, Any] =self.image_processor.post_process_masks(
A_ , A_ , A_ , A_ , binarize=A_ )
UpperCamelCase_ : str =model_outputs['iou_scores']
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Tuple =self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , A_ , A_ , A_ , A_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :List[Any]=False , _lowerCamelCase :Any=False , _lowerCamelCase :str=0.7 , ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =[]
UpperCamelCase_ : Any =[]
UpperCamelCase_ : Optional[Any] =[]
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
UpperCamelCase_ : Optional[int] =torch.cat(A_ )
UpperCamelCase_ : Dict =torch.cat(A_ )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : str =self.image_processor.post_process_for_mask_generation(
A_ , A_ , A_ , A_ )
UpperCamelCase_ : Optional[Any] =defaultdict(A_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(A_ )
UpperCamelCase_ : Union[str, Any] ={}
if output_rle_mask:
UpperCamelCase_ : Optional[Any] =rle_mask
if output_bboxes_mask:
UpperCamelCase_ : Union[str, Any] =bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 700 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE = {
'facebook/mbart-large-en-ro': 1_024,
'facebook/mbart-large-cc25': 1_024,
}
# fmt: off
__SCREAMING_SNAKE_CASE = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class a__ ( A__ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = MBartTokenizer
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self :Union[str, Any] , _lowerCamelCase :int=None , _lowerCamelCase :Optional[int]=None , _lowerCamelCase :List[Any]="<s>" , _lowerCamelCase :Any="</s>" , _lowerCamelCase :Union[str, Any]="</s>" , _lowerCamelCase :Tuple="<s>" , _lowerCamelCase :List[str]="<unk>" , _lowerCamelCase :Optional[int]="<pad>" , _lowerCamelCase :Optional[int]="<mask>" , _lowerCamelCase :str=None , _lowerCamelCase :Dict=None , _lowerCamelCase :str=None , **_lowerCamelCase :Any , ):
'''simple docstring'''
UpperCamelCase_ : str =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
vocab_file=_lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_ : List[Any] =vocab_file
UpperCamelCase_ : Dict =False if not self.vocab_file else True
UpperCamelCase_ : int =FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
UpperCamelCase_ : Dict ={
lang_code: self.convert_tokens_to_ids(_lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase_ : int =src_lang if src_lang is not None else 'en_XX'
UpperCamelCase_ : List[str] =self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase_ : Dict =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self :Any , _lowerCamelCase :str ):
'''simple docstring'''
UpperCamelCase_ : Any =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase_ ( self :Any , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self :int , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase_ : List[str] =[self.sep_token_id]
UpperCamelCase_ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :Optional[str] , _lowerCamelCase :Optional[str] , **_lowerCamelCase :str ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCamelCase_ : Optional[int] =src_lang
UpperCamelCase_ : Dict =self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_ : List[Any] =self.convert_tokens_to_ids(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =tgt_lang_id
return inputs
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :List[str] , _lowerCamelCase :str = "en_XX" , _lowerCamelCase :Optional[List[str]] = None , _lowerCamelCase :str = "ro_RO" , **_lowerCamelCase :Dict , ):
'''simple docstring'''
UpperCamelCase_ : str =src_lang
UpperCamelCase_ : Optional[int] =tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :Union[str, Any] ):
'''simple docstring'''
UpperCamelCase_ : Dict =self.convert_tokens_to_ids(_lowerCamelCase )
UpperCamelCase_ : Tuple =[]
UpperCamelCase_ : List[str] =[self.eos_token_id, self.cur_lang_code]
UpperCamelCase_ : int =self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase_ : Any =self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase_ : List[str] =processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :str ):
'''simple docstring'''
UpperCamelCase_ : str =self.convert_tokens_to_ids(_lowerCamelCase )
UpperCamelCase_ : Optional[int] =[]
UpperCamelCase_ : int =[self.eos_token_id, self.cur_lang_code]
UpperCamelCase_ : List[str] =self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase_ : Any =self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase_ : int =processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self :str , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
UpperCamelCase_ : Any =os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 395 | 0 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = Mock()
lowerCAmelCase__ = conn, Mock()
lowerCAmelCase__ = iter([1, None] )
lowerCAmelCase__ = lambda lowerCAmelCase_ : next(lowerCAmelCase_ )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=lowerCAmelCase_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 61 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
UpperCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowercase_ )
if decoder_head_mask is None:
UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase_ )
if cross_attn_head_mask is None:
UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=20 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.eos_token_id # Eos Token
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase = self.get_config()
UpperCamelCase = prepare_mam_aaa_inputs_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = MaMaaaModel(config=SCREAMING_SNAKE_CASE ).get_decoder().to(SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase = inputs_dict["input_ids"]
UpperCamelCase = inputs_dict["attention_mask"]
UpperCamelCase = inputs_dict["head_mask"]
# first forward pass
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )["last_hidden_state"]
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE )[
"last_hidden_state"
]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-2 ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = MaMaaaModel(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase = model(**SCREAMING_SNAKE_CASE )
UpperCamelCase = outputs.encoder_last_hidden_state
UpperCamelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaEncoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaDecoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __UpperCAmelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowercase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowercase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowercase = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = True
lowercase = False
lowercase = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = MaMaaaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = model_class.from_pretrained(SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
self.assertEqual(info["missing_keys"] , [] )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not self.is_encoder_decoder:
UpperCamelCase = inputs["input_ids"]
del inputs["input_ids"]
else:
UpperCamelCase = inputs["input_ids"]
UpperCamelCase = inputs.get("decoder_input_ids" , SCREAMING_SNAKE_CASE )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , SCREAMING_SNAKE_CASE )
UpperCamelCase = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase = wte(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase = wte(SCREAMING_SNAKE_CASE )
UpperCamelCase = wte(SCREAMING_SNAKE_CASE )
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE )[0]
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
UpperCamelCase = input_dict["input_ids"]
UpperCamelCase = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE ).eval().to(SCREAMING_SNAKE_CASE )
if torch_device == "cuda":
model.half()
model.generate(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
model.generate(num_beams=4 , do_sample=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , num_return_sequences=3 )
def __magic_name__ ( lowercase_ ) -> int:
'''simple docstring'''
return torch.tensor(lowercase_ , dtype=torch.long , device=lowercase_ )
__a : List[str] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
UpperCamelCase = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCamelCase = model(**SCREAMING_SNAKE_CASE )[0]
UpperCamelCase = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# change to expected output here
UpperCamelCase = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(SCREAMING_SNAKE_CASE )
# change to intended input
UpperCamelCase = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
UpperCamelCase = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCamelCase = model(**SCREAMING_SNAKE_CASE )[0]
UpperCamelCase = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# change to expected output here
UpperCamelCase = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
UpperCamelCase = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCamelCase = model.generate(
input_ids=dct["input_ids"].to(SCREAMING_SNAKE_CASE ) , attention_mask=dct["attention_mask"].to(SCREAMING_SNAKE_CASE ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
UpperCamelCase = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
UpperCamelCase = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
assert generated == expected_en
| 606 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Tuple = ['image_processor', 'tokenizer']
__lowerCamelCase : Tuple = 'Pix2StructImageProcessor'
__lowerCamelCase : List[str] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__(self , A , A ) -> Optional[int]:
"""simple docstring"""
_a = False
super().__init__(_lowercase , _lowercase )
def __call__(self , A=None , A = None , A = True , A = False , A = None , A = None , A = 2_048 , A = 0 , A = None , A = None , A = False , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_a = self.tokenizer
_a = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_a = self.image_processor(
_lowercase , return_tensors=_lowercase , max_patches=_lowercase , **_lowercase )
else:
# add pixel_values and bbox
_a = self.image_processor(
_lowercase , return_tensors=_lowercase , max_patches=_lowercase , header_text=_lowercase , **_lowercase )
if text is not None and not self.image_processor.is_vqa:
_a = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
if "attention_mask" in text_encoding:
_a = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
_a = text_encoding.pop('''input_ids''' )
else:
_a = None
if text_encoding is not None:
encoding_image_processor.update(_lowercase )
return encoding_image_processor
def a__ (self , *A , **A ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a__ (self , *A , **A ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 721 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase_ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(__A) - np.asarray(__A)) ** 2))
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(__A , __A)) ** (1 / 2)
if __name__ == "__main__":
def lowerCAmelCase ():
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''')
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ))
print('''With Numpy''')
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ))
benchmark()
| 352 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Union[str, Any] = "gptj"
UpperCAmelCase__ : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, SCREAMING_SNAKE_CASE_=5_0400, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=4096, SCREAMING_SNAKE_CASE_=28, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="gelu_new", SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Optional[Any]:
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : Dict = n_positions
UpperCamelCase : Any = n_embd
UpperCamelCase : Tuple = n_layer
UpperCamelCase : Dict = n_head
UpperCamelCase : str = n_inner
UpperCamelCase : str = rotary_dim
UpperCamelCase : Any = activation_function
UpperCamelCase : Union[str, Any] = resid_pdrop
UpperCamelCase : Dict = embd_pdrop
UpperCamelCase : Tuple = attn_pdrop
UpperCamelCase : str = layer_norm_epsilon
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : Any = use_cache
UpperCamelCase : Optional[int] = bos_token_id
UpperCamelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, tie_word_embeddings=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "default", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, ) -> Optional[int]:
super().__init__(SCREAMING_SNAKE_CASE_, task=SCREAMING_SNAKE_CASE_, patching_specs=SCREAMING_SNAKE_CASE_, use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config, 'pad_token_id', SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
UpperCamelCase : Optional[Any] = 0
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase : List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_, direction='inputs' )
UpperCamelCase : List[Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase : List[Any] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def snake_case_ ( self ) -> int:
return self._config.n_layer
@property
def snake_case_ ( self ) -> int:
return self._config.n_head
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> Mapping[str, Any]:
UpperCamelCase : Union[str, Any] = super(SCREAMING_SNAKE_CASE_, self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_, batch_size=SCREAMING_SNAKE_CASE_, seq_length=SCREAMING_SNAKE_CASE_, is_pair=SCREAMING_SNAKE_CASE_, framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase : Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase : List[str] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase : int = seqlen + 2
UpperCamelCase : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase : Dict = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
UpperCamelCase : Dict = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase : Optional[Any] = ordered_inputs['attention_mask'].dtype
UpperCamelCase : List[str] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )], dim=1 )
return ordered_inputs
@property
def snake_case_ ( self ) -> int:
return 13
| 40 |
'''simple docstring'''
from math import factorial
snake_case = {str(digit): factorial(digit) for digit in range(10)}
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowerCamelCase_ ) )
def UpperCAmelCase_ ( lowerCamelCase_ = 6_0 , lowerCamelCase_ = 1_0_0_0_0_0_0 ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
lowerCAmelCase__ : str = 0
# the cached sizes of the previous chains
lowerCAmelCase__ : dict[int, int] = {}
for start_chain_element in range(1 , lowerCamelCase_ ):
# The temporary set will contain the elements of the chain
lowerCAmelCase__ : Any = set()
lowerCAmelCase__ : int = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCAmelCase__ : Dict = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowerCamelCase_ )
chain_set_length += 1
lowerCAmelCase__ : Dict = digit_factorial_sum(lowerCamelCase_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCAmelCase__ : Optional[Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution()}')
| 378 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE :int = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Tuple = ['''OwlViTFeatureExtractor''']
__SCREAMING_SNAKE_CASE :List[Any] = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[str] = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCAmelCase_ ( __lowercase : List[str] ) -> int:
'''simple docstring'''
_UpperCAmelCase = SwinvaConfig()
_UpperCAmelCase = swinva_name.split("_" )
_UpperCAmelCase = name_split[1]
if "to" in name_split[3]:
_UpperCAmelCase = int(name_split[3][-3:] )
else:
_UpperCAmelCase = int(name_split[3] )
if "to" in name_split[2]:
_UpperCAmelCase = int(name_split[2][-2:] )
else:
_UpperCAmelCase = int(name_split[2][6:] )
if model_size == "tiny":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 6, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
_UpperCAmelCase = 128
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (4, 8, 16, 32)
else:
_UpperCAmelCase = 192
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (6, 12, 24, 48)
if "to" in swinva_name:
_UpperCAmelCase = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_UpperCAmelCase = 2_1841
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-22k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
else:
_UpperCAmelCase = 1000
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-1k-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = img_size
_UpperCAmelCase = num_classes
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
return config
def UpperCAmelCase_ ( __lowercase : str ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
_UpperCAmelCase = "encoder." + name
if "attn.proj" in name:
_UpperCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_UpperCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
_UpperCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_UpperCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
_UpperCAmelCase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
_UpperCAmelCase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
_UpperCAmelCase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
_UpperCAmelCase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
_UpperCAmelCase = "layernorm.weight"
if name == "norm.bias":
_UpperCAmelCase = "layernorm.bias"
if "head" in name:
_UpperCAmelCase = name.replace("head" , "classifier" )
else:
_UpperCAmelCase = "swinv2." + name
return name
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(__lowercase )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCAmelCase = key.split("." )
_UpperCAmelCase = int(key_split[1] )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[dim : dim * 2, :]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[:dim]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[-dim:]
else:
_UpperCAmelCase = val
return orig_state_dict
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = timm.create_model(__lowercase , pretrained=__lowercase )
timm_model.eval()
_UpperCAmelCase = get_swinva_config(__lowercase )
_UpperCAmelCase = SwinvaForImageClassification(__lowercase )
model.eval()
_UpperCAmelCase = convert_state_dict(timm_model.state_dict() , __lowercase )
model.load_state_dict(__lowercase )
_UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
_UpperCAmelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
_UpperCAmelCase = image_processor(images=__lowercase , return_tensors="pt" )
_UpperCAmelCase = timm_model(inputs["pixel_values"] )
_UpperCAmelCase = model(**__lowercase ).logits
assert torch.allclose(__lowercase , __lowercase , atol=1E-3 )
print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowercase )
model.push_to_hub(
repo_path_or_name=Path(__lowercase , __lowercase ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 119 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "data2vec-vision"
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=768 , SCREAMING_SNAKE_CASE__ : Optional[int]=12 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Any=3_072 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-1_2 , SCREAMING_SNAKE_CASE__ : Tuple=224 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=[3, 5, 7, 11] , SCREAMING_SNAKE_CASE__ : Optional[int]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[int]=0.4 , SCREAMING_SNAKE_CASE__ : Dict=256 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=255 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = use_mask_token
lowerCAmelCase__ = use_absolute_position_embeddings
lowerCAmelCase__ = use_relative_position_bias
lowerCAmelCase__ = use_shared_relative_position_bias
lowerCAmelCase__ = layer_scale_init_value
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase__ = out_indices
lowerCAmelCase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase__ = use_auxiliary_head
lowerCAmelCase__ = auxiliary_loss_weight
lowerCAmelCase__ = auxiliary_channels
lowerCAmelCase__ = auxiliary_num_convs
lowerCAmelCase__ = auxiliary_concat_input
lowerCAmelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = version.parse("1.11" )
@property
def a ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def a ( self : Optional[Any] ) -> float:
return 1e-4
| 61 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _UpperCamelCase :
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=3_2 , __UpperCamelCase=1_6 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=3_2 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=3_7 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 3_8_4, 2_4, 2_4] , __UpperCamelCase=True , __UpperCamelCase=None , )-> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = backbone_out_indices
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = backbone_featmap_shape
__lowerCAmelCase = scope
__lowerCAmelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 1
def __UpperCAmelCase ( self )-> Optional[int]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self )-> Union[str, Any]:
__lowerCAmelCase = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [9_6, 1_9_2, 3_8_4, 7_6_8],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Any:
__lowerCAmelCase = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowerCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowerCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Tuple:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowerCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase (a_ , a_ , unittest.TestCase ):
snake_case_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
snake_case_ = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __UpperCAmelCase ( self )-> List[str]:
__lowerCAmelCase = DPTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self )-> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def __UpperCAmelCase ( self )-> str:
pass
def __UpperCAmelCase ( self )-> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __UpperCAmelCase ( self )-> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__UpperCamelCase )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __UpperCAmelCase ( self )-> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __UpperCAmelCase ( self )-> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def __UpperCAmelCase ( self )-> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __UpperCAmelCase ( self )-> List[str]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
if model_class in get_values(__UpperCamelCase ):
continue
__lowerCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__lowerCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__lowerCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def __UpperCAmelCase ( self )-> Optional[int]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = False
__lowerCAmelCase = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
__lowerCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
__lowerCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__lowerCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def __UpperCAmelCase ( self )-> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
__lowerCAmelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__lowerCAmelCase = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCAmelCase ( self )-> int:
pass
@slow
def __UpperCAmelCase ( self )-> Tuple:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__lowerCAmelCase = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __UpperCAmelCase ( self )-> Union[str, Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = "add"
with self.assertRaises(__UpperCamelCase ):
__lowerCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
def __lowerCAmelCase ( ):
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class _UpperCamelCase (unittest.TestCase ):
def __UpperCAmelCase ( self )-> int:
__lowerCAmelCase = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__lowerCAmelCase = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(__UpperCamelCase )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__UpperCamelCase )
__lowerCAmelCase = outputs.predicted_depth
# verify the predicted depth
__lowerCAmelCase = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
__lowerCAmelCase = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __UpperCamelCase , atol=1e-4 ) )
| 709 |
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = len(__snake_case )
while cur > 1:
# Find the maximum number in arr
__lowerCAmelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__lowerCAmelCase = arr[mi::-1] + arr[mi + 1 : len(__snake_case )]
# Reverse whole list
__lowerCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(__snake_case )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Tuple = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 290 | 0 |
import qiskit
def lowerCAmelCase_ ( __a , __a ) -> qiskit.result.counts.Counts:
"""simple docstring"""
lowerCamelCase__: str =qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
lowerCamelCase__: str =qiskit.QuantumCircuit(__a , __a )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowerCamelCase__: Tuple =qiskit.execute(__a , __a , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'Total count for various states are: {single_qubit_measure(1, 1)}')
| 59 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def _lowerCamelCase ( self :Tuple , a :float ) -> float:
return 0.0
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int) -> tuple[int | float, int | float]:
'''simple docstring'''
__UpperCamelCase : List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1])])
__UpperCamelCase : Any = max([20, np.max(fft_results[1 : samplerate // 2 - 1])])
return lowest, highest
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : FilterType , _lowerCamelCase : int) -> None:
'''simple docstring'''
__UpperCamelCase : List[str] = 512
__UpperCamelCase : List[Any] = [1] + [0] * (size - 1)
__UpperCamelCase : List[Any] = [filter_type.process(_lowerCamelCase) for item in inputs]
__UpperCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase : Optional[int] = np.abs(np.fft.fft(_lowerCamelCase))
__UpperCamelCase : Optional[int] = 20 * np.logaa(_lowerCamelCase)
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1)
plt.xlabel("Frequency (Hz)")
plt.xscale("log")
# Display within reasonable bounds
__UpperCamelCase : Optional[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase)
plt.ylim(max([-80, bounds[0]]) , min([80, bounds[1]]))
plt.ylabel("Gain (dB)")
plt.plot(_lowerCamelCase)
plt.show()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : FilterType , _lowerCamelCase : int) -> None:
'''simple docstring'''
__UpperCamelCase : Any = 512
__UpperCamelCase : Dict = [1] + [0] * (size - 1)
__UpperCamelCase : Tuple = [filter_type.process(_lowerCamelCase) for item in inputs]
__UpperCamelCase : Dict = [0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase : Optional[int] = np.angle(np.fft.fft(_lowerCamelCase))
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1)
plt.xlabel("Frequency (Hz)")
plt.xscale("log")
plt.ylim(-2 * pi , 2 * pi)
plt.ylabel("Phase shift (Radians)")
plt.plot(np.unwrap(_lowerCamelCase , -2 * pi))
plt.show() | 557 | 0 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_UpperCamelCase = numpy.array([0, 0])
_UpperCamelCase = numpy.array([0.5, 0.8_66_02_54])
_UpperCamelCase = numpy.array([1, 0])
_UpperCamelCase = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = initial_vectors
for _ in range(lowercase__ ):
__lowerCAmelCase : List[str] = iteration_step(lowercase__ )
return vectors
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[Any] = []
for i, start_vector in enumerate(vectors[:-1] ):
__lowerCAmelCase : List[str] = vectors[i + 1]
new_vectors.append(lowercase__ )
__lowerCAmelCase : Union[str, Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : str = numpy.radians(lowercase__ )
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = numpy.cos(lowercase__ ), numpy.sin(lowercase__ )
__lowerCAmelCase : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase__ , lowercase__ )
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__lowerCAmelCase, __lowerCAmelCase : List[Any] = zip(*lowercase__ )
plt.plot(lowercase__ , lowercase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 583 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 583 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : List[str] = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = ['LayoutLMv2FeatureExtractor']
lowerCamelCase__ : List[Any] = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ : List[Any] = '▁'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "token_type_ids"]
lowercase_ = FNetTokenizer
def __init__( self : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,) | 31 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowercase = LongformerTokenizer
lowercase = True
lowercase = LongformerTokenizerFast
lowercase = True
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase = {"unk_token": "<unk>"}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = "lower newer"
UpperCamelCase = "lower newer"
return input_text, output_text
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase = "lower newer"
UpperCamelCase = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
UpperCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=SCREAMING_SNAKE_CASE ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=SCREAMING_SNAKE_CASE ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
UpperCamelCase = tokenizer.encode("sequence builders" , add_special_tokens=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode(
"sequence builders" , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = "Encode this sequence."
UpperCamelCase = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
UpperCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
UpperCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
UpperCamelCase = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )} ) # mask token has a left space
UpperCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
UpperCamelCase = "Encode <mask> sequence"
UpperCamelCase = "Encode <mask>sequence"
UpperCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE )
UpperCamelCase = encoded.index(SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE )
UpperCamelCase = encoded.index(SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase = "A, <mask> AllenNLP sentence."
UpperCamelCase = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["add_prefix_space"] , SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["trim_offsets"] , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase = f'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE ) + 1, len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE ) + 1, len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
UpperCamelCase = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE ) + 1, 1 + len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE ), 1 + len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE ), 1 + len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
| 414 |
def __magic_name__ ( lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def __magic_name__ ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = [[float("inf" ) for _ in range(lowercase_ )] for _ in range(lowercase_ )]
for i in range(lowercase_ ):
for j in range(lowercase_ ):
UpperCamelCase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowercase_ ):
# looping through rows of graph array
for i in range(lowercase_ ):
# looping through columns of graph array
for j in range(lowercase_ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
UpperCamelCase = dist[i][k] + dist[k][j]
_print_dist(lowercase_ , lowercase_ )
return dist, v
if __name__ == "__main__":
__a : Optional[int] = int(input("""Enter number of vertices: """))
__a : List[Any] = int(input("""Enter number of edges: """))
__a : List[str] = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
__a : Optional[int] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
__a : int = int(input("""Enter source:"""))
__a : Union[str, Any] = int(input("""Enter destination:"""))
__a : Any = float(input("""Enter weight:"""))
__a : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 414 | 1 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''segformer'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : List[Any]=[2, 2, 2, 2] , UpperCAmelCase_ : Optional[Any]=[8, 4, 2, 1] , UpperCAmelCase_ : Dict=[32, 64, 160, 256] , UpperCAmelCase_ : Any=[7, 3, 3, 3] , UpperCAmelCase_ : Any=[4, 2, 2, 2] , UpperCAmelCase_ : Optional[Any]=[1, 2, 5, 8] , UpperCAmelCase_ : Optional[Any]=[4, 4, 4, 4] , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : str=256 , UpperCAmelCase_ : str=255 , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(**UpperCAmelCase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , UpperCAmelCase_ , )
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : str = num_encoder_blocks
UpperCamelCase__ : Any = depths
UpperCamelCase__ : List[Any] = sr_ratios
UpperCamelCase__ : Optional[Any] = hidden_sizes
UpperCamelCase__ : int = patch_sizes
UpperCamelCase__ : int = strides
UpperCamelCase__ : Dict = mlp_ratios
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Dict = hidden_dropout_prob
UpperCamelCase__ : List[str] = attention_probs_dropout_prob
UpperCamelCase__ : Optional[Any] = classifier_dropout_prob
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : str = drop_path_rate
UpperCamelCase__ : Union[str, Any] = layer_norm_eps
UpperCamelCase__ : Optional[Any] = decoder_hidden_size
UpperCamelCase__ : Optional[Any] = kwargs.get('reshape_last_stage' , UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = semantic_loss_ignore_index
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : List[Any]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : Optional[Any]):
return 1e-4
@property
def __UpperCamelCase ( self : Optional[int]):
return 12
| 596 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowercase (unittest.TestCase ):
def __init__( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[Any]=99 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Optional[Any]=5 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : int=4 , ):
UpperCamelCase__ : Dict = parent
UpperCamelCase__ : Any = batch_size
UpperCamelCase__ : Dict = seq_length
UpperCamelCase__ : Any = is_training
UpperCamelCase__ : int = use_attention_mask
UpperCamelCase__ : Dict = use_token_type_ids
UpperCamelCase__ : Optional[Any] = use_labels
UpperCamelCase__ : Dict = vocab_size
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : Union[str, Any] = num_hidden_layers
UpperCamelCase__ : Any = num_attention_heads
UpperCamelCase__ : Tuple = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_act
UpperCamelCase__ : Optional[Any] = hidden_dropout_prob
UpperCamelCase__ : List[str] = attention_probs_dropout_prob
UpperCamelCase__ : int = max_position_embeddings
UpperCamelCase__ : List[str] = type_vocab_size
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : Dict = num_choices
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase__ : List[str] = None
if self.use_attention_mask:
UpperCamelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCamelCase__ : Dict = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : int = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : int = config_and_inputs
UpperCamelCase__ : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __lowercase (__lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : List[Any] = FlaxAlbertModelTester(self)
@slow
def __UpperCamelCase ( self : int):
for model_class_name in self.all_model_classes:
UpperCamelCase__ : Dict = model_class_name.from_pretrained('albert-base-v2')
UpperCamelCase__ : Tuple = model(np.ones((1, 1)))
self.assertIsNotNone(UpperCAmelCase_)
@require_flax
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : int = FlaxAlbertModel.from_pretrained('albert-base-v2')
UpperCamelCase__ : Dict = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]])
UpperCamelCase__ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
UpperCamelCase__ : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)[0]
UpperCamelCase__ : List[str] = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_)
UpperCamelCase__ : Dict = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1e-4))
| 596 | 1 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
'''simple docstring'''
@staticmethod
def _lowercase ( *_lowercase , **_lowercase ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = image_classifier(_lowercase , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowercase ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
_lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
] , )
@require_tf
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = image_classifier(_lowercase , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
_lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
] , )
@slow
@require_torch
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = image_classifier(_lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
_lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = image_classifier(_lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
_lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 162 |
'''simple docstring'''
from torch import nn
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = class_size
_lowerCAmelCase = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_lowerCAmelCase = nn.Linear(_lowercase , _lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.mlp(_lowercase )
return logits
| 162 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case : int = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
snake_case : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 445 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __snake_case ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=4 , ) -> List[Any]:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_attention_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_choices
def _snake_case ( self ) -> Any:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_attention_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self ) -> Tuple:
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _snake_case ( self ) -> Union[str, Any]:
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = True
snake_case__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __snake_case ( __magic_name__ , unittest.TestCase ):
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self ) -> List[Any]:
snake_case__ = FlaxRobertaModelTester(self )
@slow
def _snake_case ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
snake_case__ = model_class_name.from_pretrained('roberta-base' , from_pt=UpperCamelCase_ )
snake_case__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 368 | 0 |
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 708 |
'''simple docstring'''
import numpy as np
import qiskit
def _A (lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :int | None = None ) -> str:
'''simple docstring'''
_a = np.random.default_rng(seed=lowerCAmelCase__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_a = 6 * key_len
# Measurement basis for Alice's qubits.
_a = rng.integers(2 , size=lowerCAmelCase__ )
# The set of states Alice will prepare.
_a = rng.integers(2 , size=lowerCAmelCase__ )
# Measurement basis for Bob's qubits.
_a = rng.integers(2 , size=lowerCAmelCase__ )
# Quantum Circuit to simulate BB84
_a = qiskit.QuantumCircuit(lowerCAmelCase__ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(lowerCAmelCase__ ):
if alice_state[index] == 1:
bbaa_circ.x(lowerCAmelCase__ )
if alice_basis[index] == 1:
bbaa_circ.h(lowerCAmelCase__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(lowerCAmelCase__ ):
if bob_basis[index] == 1:
bbaa_circ.h(lowerCAmelCase__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_a = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_a = qiskit.execute(lowerCAmelCase__ , lowerCAmelCase__ , shots=1 , seed_simulator=lowerCAmelCase__ )
# Returns the result of measurement.
_a = job.result().get_counts(lowerCAmelCase__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_a = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_a = gen_key[:key_len] if len(lowerCAmelCase__ ) >= key_len else gen_key.ljust(lowerCAmelCase__ , '0' )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 532 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ : Optional[int] = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
lowercase__ : List[str] = {
'gpt2': 10_24,
'gpt2-medium': 10_24,
'gpt2-large': 10_24,
'gpt2-xl': 10_24,
'distilgpt2': 10_24,
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Any = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : str = ['input_ids', 'attention_mask']
_snake_case : Dict = GPTaTokenizer
def __init__( self : Any , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Union[str, Any]="<|endoftext|>" , lowerCAmelCase__ : Optional[Any]="<|endoftext|>" , lowerCAmelCase__ : Tuple="<|endoftext|>" , lowerCAmelCase__ : str=False , **lowerCAmelCase__ : List[Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCamelCase = kwargs.pop('''add_bos_token''' , lowerCAmelCase__ )
_UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase__ ) != add_prefix_space:
_UpperCamelCase = getattr(lowerCAmelCase__ , pre_tok_state.pop('''type''' ) )
_UpperCamelCase = add_prefix_space
_UpperCamelCase = pre_tok_class(**lowerCAmelCase__ )
_UpperCamelCase = add_prefix_space
def snake_case__ ( self : Union[str, Any] , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : Any ) -> BatchEncoding:
'''simple docstring'''
_UpperCamelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] , *lowerCAmelCase__ : int , **lowerCAmelCase__ : List[Any] ) -> BatchEncoding:
'''simple docstring'''
_UpperCamelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_UpperCamelCase = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : "Conversation" ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
_UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 98 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
# Initialise PyTorch model
_snake_case : List[str] = FunnelConfig.from_json_file(lowercase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case : str = FunnelBaseModel(lowercase_ ) if base_model else FunnelModel(lowercase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 326 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : Dict = '▁'
A : Optional[int] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
A : List[Any] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
A : Any = {
'facebook/s2t-small-librispeech-asr': 1024,
}
A : Union[str, Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
A : Dict = {'mustc': MUSTC_LANGS}
class _lowercase ( SCREAMING_SNAKE_CASE_):
"""simple docstring"""
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = MAX_MODEL_INPUT_SIZES
A__ = ["input_ids", "attention_mask"]
A__ = []
def __init__( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : Any="<unk>" , __lowerCamelCase : str=False , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[str]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : str = None , **__lowerCamelCase : Dict , ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_upper_case=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , lang_codes=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase__ : int = do_upper_case
lowerCamelCase__ : List[str] = do_lower_case
lowerCamelCase__ : List[Any] = load_json(UpperCamelCase__ )
lowerCamelCase__ : str = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ : Union[str, Any] = spm_file
lowerCamelCase__ : int = load_spm(UpperCamelCase__ , self.sp_model_kwargs )
if lang_codes is not None:
lowerCamelCase__ : Tuple = lang_codes
lowerCamelCase__ : Any = LANGUAGES[lang_codes]
lowerCamelCase__ : Any = [f"<lang:{lang}>" for lang in self.langs]
lowerCamelCase__ : Optional[int] = {lang: self.sp_model.PieceToId(f"<lang:{lang}>" ) for lang in self.langs}
lowerCamelCase__ : Any = self.lang_tokens
lowerCamelCase__ : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowerCamelCase__ : str = {}
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.encoder )
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(UpperCamelCase__ )
def lowerCAmelCase ( self : str , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.lang_code_to_id[tgt_lang]
lowerCamelCase__ : Optional[Any] = [lang_code_id]
def lowerCAmelCase ( self : Dict , __lowerCamelCase : int ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def lowerCAmelCase ( self : Any , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.encoder.get(UpperCamelCase__ , self.encoder[self.unk_token] )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(UpperCamelCase__ , self.unk_token )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : Optional[int] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowerCamelCase__ : Any = self.sp_model.decode(UpperCamelCase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowerCamelCase__ : List[Any] = []
else:
current_sub_tokens.append(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self.sp_model.decode(UpperCamelCase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Dict=None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str = None , __lowerCamelCase : Union[str, Any] = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [1] * len(self.prefix_tokens )
lowerCamelCase__ : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.__dict__.copy()
lowerCamelCase__ : Any = None
return state
def __setstate__( self : str , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase__ : int = {}
lowerCamelCase__ : str = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] = None ):
'''simple docstring'''
lowerCamelCase__ : int = Path(UpperCamelCase__ )
assert save_dir.is_dir(), f"{save_directory} should be a directory"
lowerCamelCase__ : Optional[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowerCamelCase__ : List[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , UpperCamelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , UpperCamelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (str(UpperCamelCase__ ), str(UpperCamelCase__ ))
def lowercase_ ( _A : str , _A : Dict[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = sentencepiece.SentencePieceProcessor(**__UpperCamelCase )
spm.Load(str(__UpperCamelCase ) )
return spm
def lowercase_ ( _A : str ):
"""simple docstring"""
with open(__UpperCamelCase , "r" ) as f:
return json.load(__UpperCamelCase )
def lowercase_ ( _A : Optional[Any] , _A : str ):
"""simple docstring"""
with open(__UpperCamelCase , "w" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase , indent=2 )
| 710 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[Any]=36 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[int]=6 , __lowerCamelCase : Dict=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Dict=None , ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Any = seq_length
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : int = use_input_mask
lowerCamelCase__ : List[str] = use_token_type_ids
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : List[Any] = embedding_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_hidden_groups
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : List[str] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : Optional[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : str = num_labels
lowerCamelCase__ : List[Any] = num_choices
lowerCamelCase__ : Any = scope
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[int] = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : int = None
if self.use_labels:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : int = AlbertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Any = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Any = AlbertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Dict = AlbertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : str = AlbertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : str = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Optional[int] = AlbertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : List[str] = AlbertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.num_choices
lowerCamelCase__ : Optional[int] = AlbertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : int = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCamelCase__ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
'''simple docstring'''
lowerCamelCase__ : Any = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
lowerCamelCase__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = AlbertModelTester(self )
lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Dict = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[str] = AlbertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = AlbertModel.from_pretrained("albert-base-v2" )
lowerCamelCase__ : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase__ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
lowerCamelCase__ : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
lowerCamelCase__ : Dict = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
| 5 | 0 |
import re
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
if len(re.findall('''[ATCG]''' , lowercase_ ) ) != len(lowercase_ ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : str ) -> str | Literal[False]:
'''simple docstring'''
UpperCamelCase__ : Any = list(__lowerCamelCase )
UpperCamelCase__ : str = list(__lowerCamelCase )
UpperCamelCase__ : Dict = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
UpperCamelCase__ : Dict = '''_'''
if count > 1:
return False
else:
return "".join(__lowerCamelCase )
def _lowercase ( __lowerCamelCase : list[str] ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ : str = []
while True:
UpperCamelCase__ : Tuple = ['''$'''] * len(__lowerCamelCase )
UpperCamelCase__ : str = []
for i in range(len(__lowerCamelCase ) ):
for j in range(i + 1 ,len(__lowerCamelCase ) ):
UpperCamelCase__ : Optional[Any] = compare_string(binary[i] ,binary[j] )
if k is False:
UpperCamelCase__ : Any = '''*'''
UpperCamelCase__ : int = '''*'''
temp.append('''X''' )
for i in range(len(__lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__lowerCamelCase ) == 0:
return pi
UpperCamelCase__ : Tuple = list(set(__lowerCamelCase ) )
def _lowercase ( __lowerCamelCase : int ,__lowerCamelCase : Sequence[float] ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = []
for minterm in minterms:
UpperCamelCase__ : Optional[Any] = ''''''
for _ in range(__lowerCamelCase ):
UpperCamelCase__ : Optional[int] = str(minterm % 2 ) + string
minterm //= 2
temp.append(__lowerCamelCase )
return temp
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : int ) -> bool:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = list(__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = list(__lowerCamelCase )
UpperCamelCase__ : str = 0
for i in range(len(__lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _lowercase ( __lowerCamelCase : list[list[int]] ,__lowerCamelCase : list[str] ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Union[str, Any] = [0] * len(__lowerCamelCase )
for i in range(len(chart[0] ) ):
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : int = -1
for j in range(len(__lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
UpperCamelCase__ : Dict = j
if count == 1:
UpperCamelCase__ : int = 1
for i in range(len(__lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__lowerCamelCase ) ):
UpperCamelCase__ : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCamelCase__ : Any = 0
UpperCamelCase__ : List[str] = -1
UpperCamelCase__ : Dict = 0
for i in range(len(__lowerCamelCase ) ):
UpperCamelCase__ : Any = chart[i].count(1 )
if count_n > max_n:
UpperCamelCase__ : Optional[int] = count_n
UpperCamelCase__ : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__lowerCamelCase ) ):
UpperCamelCase__ : List[str] = 0
def _lowercase ( __lowerCamelCase : list[str] ,__lowerCamelCase : list[str] ) -> list[list[int]]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = [[0 for x in range(len(__lowerCamelCase ) )] for x in range(len(__lowerCamelCase ) )]
for i in range(len(__lowerCamelCase ) ):
UpperCamelCase__ : Optional[Any] = prime_implicants[i].count('''_''' )
for j in range(len(__lowerCamelCase ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,__lowerCamelCase ):
UpperCamelCase__ : Optional[int] = 1
return chart
def _lowercase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ : int = int(input('''Enter the no. of variables\n''' ) )
UpperCamelCase__ : Optional[Any] = [
float(__lowerCamelCase )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
UpperCamelCase__ : Dict = decimal_to_binary(__lowerCamelCase ,__lowerCamelCase )
UpperCamelCase__ : Any = check(__lowerCamelCase )
print('''Prime Implicants are:''' )
print(__lowerCamelCase )
UpperCamelCase__ : Dict = prime_implicant_chart(__lowerCamelCase ,__lowerCamelCase )
UpperCamelCase__ : Tuple = selection(__lowerCamelCase ,__lowerCamelCase )
print('''Essential Prime Implicants are:''' )
print(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 344 | 0 |
"""simple docstring"""
import sys
from collections import defaultdict
class _lowercase :
def __init__( self : Any ):
"""simple docstring"""
__snake_case : Any =[]
def _UpperCamelCase ( self : Union[str, Any] , a : int ):
"""simple docstring"""
return self.node_position[vertex]
def _UpperCamelCase ( self : int , a : Dict , a : List[Any] ):
"""simple docstring"""
__snake_case : Any =pos
def _UpperCamelCase ( self : Optional[Any] , a : int , a : Optional[int] , a : Tuple , a : Dict ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__snake_case : List[Any] =2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__snake_case : Tuple =2 * start + 1
else:
__snake_case : List[str] =2 * start + 2
if heap[smallest_child] < heap[start]:
__snake_case , __snake_case : List[Any] =heap[smallest_child], positions[smallest_child]
__snake_case , __snake_case : Optional[Any] =(
heap[start],
positions[start],
)
__snake_case , __snake_case : Optional[int] =temp, tempa
__snake_case : Any =self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def _UpperCamelCase ( self : Any , a : Dict , a : Optional[int] , a : Union[str, Any] , a : str ):
"""simple docstring"""
__snake_case : List[str] =position[index]
while index != 0:
__snake_case : Any =int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__snake_case : List[str] =heap[parent]
__snake_case : Optional[Any] =position[parent]
self.set_position(position[parent] , a )
else:
__snake_case : Any =val
__snake_case : Optional[int] =temp
self.set_position(a , a )
break
__snake_case : str =parent
else:
__snake_case : Any =val
__snake_case : Any =temp
self.set_position(a , 0 )
def _UpperCamelCase ( self : str , a : Optional[Any] , a : int ):
"""simple docstring"""
__snake_case : Union[str, Any] =len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def _UpperCamelCase ( self : Optional[Any] , a : Optional[int] , a : List[Any] ):
"""simple docstring"""
__snake_case : str =positions[0]
__snake_case : Union[str, Any] =sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def __lowercase ( a : Optional[Any] ) -> List[str]:
__snake_case : str =Heap()
__snake_case : Optional[Any] =[0] * len(a )
__snake_case : Any =[-1] * len(a ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__snake_case : Optional[Any] =[] # Heap of Distance of vertices from their neighboring vertex
__snake_case : int =[]
for vertex in range(len(a ) ):
distance_tv.append(sys.maxsize )
positions.append(a )
heap.node_position.append(a )
__snake_case : int =[]
__snake_case : int =1
__snake_case : Union[str, Any] =sys.maxsize
for neighbor, distance in adjacency_list[0]:
__snake_case : List[str] =0
__snake_case : str =distance
heap.heapify(a , a )
for _ in range(1 , len(a ) ):
__snake_case : Union[str, Any] =heap.delete_minimum(a , a )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__snake_case : Optional[Any] =1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(a )]
):
__snake_case : List[Any] =distance
heap.bottom_to_top(
a , heap.get_position(a ) , a , a )
__snake_case : List[Any] =vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCamelCase_ : Tuple = int(input("""Enter number of edges: """).strip())
UpperCamelCase_ : Union[str, Any] = defaultdict(list)
for _ in range(edges_number):
UpperCamelCase_ : Tuple = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 497 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( a : int , a : int ) -> list[str]:
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
__snake_case : List[str] =number_of_bytes // partitions
__snake_case : str =[]
for i in range(a ):
__snake_case : Optional[Any] =i * bytes_per_partition + 1
__snake_case : Any =(
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a_ ( lowerCamelCase ):
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """LayoutLMv3ImageProcessor"""
lowercase = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _SCREAMING_SNAKE_CASE , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
UpperCamelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase = features["""words"""]
UpperCamelCase = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# add pixel values
UpperCamelCase = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
UpperCamelCase = self.get_overflowing_images(_SCREAMING_SNAKE_CASE , encoded_inputs["""overflow_to_sample_mapping"""] )
UpperCamelCase = images
return encoded_inputs
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F" {len(_SCREAMING_SNAKE_CASE )} and {len(_SCREAMING_SNAKE_CASE )}" )
return images_with_overflow
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> List[str]:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor
| 301 |
'''simple docstring'''
import math
import sys
def lowercase__ ( __UpperCamelCase )-> int:
if number != int(__UpperCamelCase ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
UpperCamelCase = [-1] * (number + 1)
UpperCamelCase = 0
for i in range(1 , number + 1 ):
UpperCamelCase = sys.maxsize
UpperCamelCase = int(math.sqrt(__UpperCamelCase ) )
for j in range(1 , root + 1 ):
UpperCamelCase = 1 + answers[i - (j**2)]
UpperCamelCase = min(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 | 1 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowercase__ : List[str] = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""")
@require_torch
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Tuple , lowercase_ : Path , lowercase_ : Union[str, None] = None , lowercase_ : Union[List[str], None] = None , lowercase_ : Union[str, List[str], None] = None , lowercase_ : bool = True , ):
snake_case_ : Any = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )]
if identifier is not None:
snake_case_ : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_ ):
for n_ in n_identifier:
snake_case_ : Any = [file for file in files if n_ not in file]
else:
snake_case_ : Dict = [file for file in files if n_identifier not in file]
snake_case_ : str = ignore_files or []
ignore_files.append('''__init__.py''' )
snake_case_ : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , lowercase_ )
if only_modules:
snake_case_ : Tuple = file.split('''.''' )[0]
try:
snake_case_ : List[str] = getattr(lowercase_ , lowercase_ )
snake_case_ : List[str] = doctest.DocTestSuite(lowercase_ )
snake_case_ : Tuple = unittest.TextTestRunner().run(lowercase_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"{module_identifier} is not a module." )
else:
snake_case_ : int = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _snake_case ( self : Any ):
snake_case_ : Any = Path('''src/transformers''' )
snake_case_ : List[Any] = '''modeling'''
snake_case_ : Dict = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : List[Any] = Path('''src/transformers''' )
snake_case_ : Optional[Any] = '''tokenization'''
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : List[Any] = Path('''src/transformers''' )
snake_case_ : Dict = '''configuration'''
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def _snake_case ( self : Any ):
snake_case_ : Tuple = Path('''src/transformers''' )
snake_case_ : Union[str, Any] = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(lowercase_ , n_identifier=lowercase_ )
def _snake_case ( self : Optional[int] ):
snake_case_ : Tuple = Path('''docs/source''' )
snake_case_ : int = ['''favicon.ico''']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
| 700 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ : Any = get_tests_dir('''fixtures''')
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Tuple ):
# A mock response for an HTTP head request to emulate server down
snake_case_ : str = mock.Mock()
snake_case_ : Optional[Any] = 500
snake_case_ : str = {}
snake_case_ : Optional[int] = HTTPError
snake_case_ : Tuple = {}
# Download this model to make sure it's in the cache.
snake_case_ : List[str] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowercase_ ) as mock_head:
snake_case_ : int = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase):
@classmethod
def _snake_case ( cls : int ):
snake_case_ : Dict = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def _snake_case ( cls : str ):
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def _snake_case ( self : Any ):
snake_case_ : int = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
snake_case_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''test-feature-extractor''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _snake_case ( self : Dict ):
snake_case_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=lowercase_ , use_auth_token=self._token )
snake_case_ : int = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _snake_case ( self : str ):
CustomFeatureExtractor.register_for_auto_class()
snake_case_ : Optional[Any] = CustomFeatureExtractor.from_pretrained(lowercase_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
snake_case_ : Any = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 485 | 0 |
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] = 100 ):
snake_case : Dict = set()
snake_case : List[Any] = 0
snake_case : List[Any] = n + 1 # maximum limit
for a in range(2 , __lowerCamelCase ):
for b in range(2 , __lowerCamelCase ):
snake_case : int = a**b # calculates the current power
collect_powers.add(__lowerCamelCase ) # adds the result to the set
return len(__lowerCamelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 204 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_SCREAMING_SNAKE_CASE : List[Any] = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
_SCREAMING_SNAKE_CASE : List[Any] = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
_SCREAMING_SNAKE_CASE : List[Any] = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
_SCREAMING_SNAKE_CASE : List[Any] = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Union[str, Any]):
import nltk
nltk.download("wordnet")
if NLTK_VERSION >= version.Version("3.6.5"):
nltk.download("punkt")
if NLTK_VERSION >= version.Version("3.6.6"):
nltk.download("omw-1.4")
def UpperCAmelCase__ ( self : int , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int=0.9 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : Dict=0.5):
if NLTK_VERSION >= version.Version("3.6.5"):
_lowercase: List[str] = [
meteor_score.single_meteor_score(
word_tokenize(_UpperCamelCase) , word_tokenize(_UpperCamelCase) , alpha=_UpperCamelCase , beta=_UpperCamelCase , gamma=_UpperCamelCase)
for ref, pred in zip(_UpperCamelCase , _UpperCamelCase)
]
else:
_lowercase: Optional[int] = [
meteor_score.single_meteor_score(_UpperCamelCase , _UpperCamelCase , alpha=_UpperCamelCase , beta=_UpperCamelCase , gamma=_UpperCamelCase)
for ref, pred in zip(_UpperCamelCase , _UpperCamelCase)
]
return {"meteor": np.mean(_UpperCamelCase)}
| 226 | 0 |
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_a : Dict = logging.get_logger(__name__)
@add_end_docstrings(_snake_case)
class _UpperCAmelCase ( _snake_case):
def __init__( self , *snake_case_ , **snake_case_ ):
super().__init__(*snake_case_ , **snake_case_ )
requires_backends(self , "decord" )
self.check_model_type(snake_case_ )
def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=None ):
_snake_case : Optional[Any] = {}
if frame_sampling_rate is not None:
_snake_case : Dict = frame_sampling_rate
if num_frames is not None:
_snake_case : List[str] = num_frames
_snake_case : Dict = {}
if top_k is not None:
_snake_case : Any = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , snake_case_ , **snake_case_ ):
return super().__call__(snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_=None , snake_case_=1 ):
if num_frames is None:
_snake_case : Optional[int] = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
_snake_case : str = BytesIO(requests.get(snake_case_ ).content )
_snake_case : Optional[int] = VideoReader(snake_case_ )
videoreader.seek(0 )
_snake_case : List[Any] = 0
_snake_case : Optional[int] = num_frames * frame_sampling_rate - 1
_snake_case : Union[str, Any] = np.linspace(snake_case_ , snake_case_ , num=snake_case_ , dtype=np.intaa )
_snake_case : Optional[int] = videoreader.get_batch(snake_case_ ).asnumpy()
_snake_case : Union[str, Any] = list(snake_case_ )
_snake_case : str = self.image_processor(snake_case_ , return_tensors=self.framework )
return model_inputs
def lowerCamelCase__ ( self , snake_case_ ):
_snake_case : List[Any] = self.model(**snake_case_ )
return model_outputs
def lowerCamelCase__ ( self , snake_case_ , snake_case_=5 ):
if top_k > self.model.config.num_labels:
_snake_case : Tuple = self.model.config.num_labels
if self.framework == "pt":
_snake_case : List[str] = model_outputs.logits.softmax(-1 )[0]
_snake_case , _snake_case : Optional[Any] = probs.topk(snake_case_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
_snake_case : List[Any] = scores.tolist()
_snake_case : Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case_ , snake_case_ )]
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : Optional[int] = logging.get_logger(__name__)
_a : List[str] = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class _UpperCAmelCase ( _snake_case , _snake_case):
__lowercase : List[Any] = """convnextv2"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=4 , snake_case_=None , snake_case_=None , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.0 , snake_case_=2_24 , snake_case_=None , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ )
_snake_case : Tuple = num_channels
_snake_case : Optional[int] = patch_size
_snake_case : Tuple = num_stages
_snake_case : int = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
_snake_case : str = [3, 3, 9, 3] if depths is None else depths
_snake_case : int = hidden_act
_snake_case : Tuple = initializer_range
_snake_case : Union[str, Any] = layer_norm_eps
_snake_case : Optional[int] = drop_path_rate
_snake_case : Union[str, Any] = image_size
_snake_case : List[Any] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
_snake_case , _snake_case : Dict = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 87 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase__ =pytest.mark.integration
@pytest.mark.parametrize("path", ["paws", "csv"] )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
inspect_dataset(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = path + ".py"
assert script_name in os.listdir(__lowerCamelCase )
assert "__pycache__" not in os.listdir(__lowerCamelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path", ["accuracy"] )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
inspect_metric(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = path + ".py"
assert script_name in os.listdir(__lowerCamelCase )
assert "__pycache__" not in os.listdir(__lowerCamelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits", [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
], )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = get_dataset_config_info(__lowerCamelCase, config_name=__lowerCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception", [
("paws", None, ValueError),
], )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
with pytest.raises(__lowerCamelCase ):
get_dataset_config_info(__lowerCamelCase, config_name=__lowerCamelCase )
@pytest.mark.parametrize(
"path, expected", [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
], )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = get_dataset_config_names(__lowerCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config", [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
], )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = get_dataset_infos(__lowerCamelCase )
assert list(infos.keys() ) == expected_configs
_SCREAMING_SNAKE_CASE : List[Any] = expected_configs[0]
assert expected_config in infos
_SCREAMING_SNAKE_CASE : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits", [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
], )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = get_dataset_infos(__lowerCamelCase )
assert expected_config in infos
_SCREAMING_SNAKE_CASE : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception", [
("paws", None, ValueError),
], )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
with pytest.raises(__lowerCamelCase ):
get_dataset_split_names(__lowerCamelCase, config_name=__lowerCamelCase ) | 249 | """simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def SCREAMING_SNAKE_CASE ( snake_case):
if isinstance(snake_case, collections.abc.Iterable):
return x
return (x, x)
@require_tf
class _A :
"""simple docstring"""
def lowercase ( self : List[Any] , A_ : Optional[Any] , A_ : int ) -> Any:
pass
def lowercase ( self : List[Any] ) -> Union[str, Any]:
pass
def lowercase ( self : Any ) -> Union[str, Any]:
pass
def lowercase ( self : List[str] , A_ : int , A_ : Tuple , A_ : List[Any] , A_ : Optional[int] , A_ : Tuple=None , **A_ : str ) -> Tuple:
__snake_case = VisionTextDualEncoderConfig.from_vision_text_configs(A_ , A_ )
__snake_case = TFVisionTextDualEncoderModel(A_ )
__snake_case = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def lowercase ( self : List[str] , A_ : Dict , A_ : Union[str, Any] , A_ : int , A_ : int , A_ : Union[str, Any]=None , **A_ : Union[str, Any] ) -> List[str]:
__snake_case , __snake_case = self.get_vision_text_model(A_ , A_ )
__snake_case = TFVisionTextDualEncoderModel(vision_model=A_ , text_model=A_ )
__snake_case = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase ( self : Tuple , A_ : Any , A_ : Dict , A_ : Any , A_ : Optional[Any] , A_ : Optional[int]=None , **A_ : str ) -> Optional[Any]:
__snake_case , __snake_case = self.get_vision_text_model(A_ , A_ )
__snake_case = {'''vision_model''': vision_model, '''text_model''': text_model}
__snake_case = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**A_ )
__snake_case = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase ( self : str , A_ : str , A_ : Optional[Any] , A_ : Any , A_ : Optional[int] , A_ : Tuple=None , **A_ : int ) -> int:
__snake_case , __snake_case = self.get_vision_text_model(A_ , A_ )
__snake_case = TFVisionTextDualEncoderModel(vision_model=A_ , text_model=A_ )
__snake_case = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ )
__snake_case = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
__snake_case = TFVisionTextDualEncoderModel.from_pretrained(A_ )
__snake_case = model(input_ids=A_ , pixel_values=A_ , attention_mask=A_ )
__snake_case = after_output[0].numpy()
__snake_case = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_ , 1E-5 )
def lowercase ( self : List[str] , A_ : str , A_ : Dict , A_ : List[str] , A_ : str , A_ : int=None , **A_ : Union[str, Any] ) -> List[str]:
__snake_case , __snake_case = self.get_vision_text_model(A_ , A_ )
__snake_case = TFVisionTextDualEncoderModel(vision_model=A_ , text_model=A_ )
__snake_case = model(
input_ids=A_ , pixel_values=A_ , attention_mask=A_ , output_attentions=A_ )
__snake_case = output.vision_model_output.attentions
self.assertEqual(len(A_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = to_atuple(vision_model.config.image_size )
__snake_case = to_atuple(vision_model.config.patch_size )
__snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__snake_case = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__snake_case = output.text_model_output.attentions
self.assertEqual(len(A_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase ( self : Dict , A_ : np.ndarray , A_ : np.ndarray , A_ : float ) -> Union[str, Any]:
__snake_case = np.abs((a - b) ).max()
self.assertLessEqual(A_ , A_ , f"Difference between torch and flax is {diff} (>= {tol})." )
def lowercase ( self : List[str] ) -> Optional[int]:
__snake_case = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**A_ )
def lowercase ( self : Optional[int] ) -> int:
__snake_case = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**A_ )
def lowercase ( self : List[str] ) -> Union[str, Any]:
__snake_case = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**A_ )
def lowercase ( self : List[str] ) -> int:
__snake_case = self.prepare_config_and_inputs()
self.check_save_load(**A_ )
def lowercase ( self : Optional[int] ) -> List[str]:
__snake_case = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**A_ )
@slow
def lowercase ( self : Any ) -> Any:
__snake_case , __snake_case = self.get_pretrained_model_and_inputs()
__snake_case = model_a(**A_ )
__snake_case = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(A_ )
__snake_case = TFVisionTextDualEncoderModel.from_pretrained(A_ )
__snake_case = model_a(**A_ )
__snake_case = after_outputs[0].numpy()
__snake_case = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_ , 1E-5 )
@require_tf
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Tuple ) -> List[str]:
__snake_case = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
__snake_case = 13
__snake_case = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__snake_case = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__snake_case = random_attention_mask([batch_size, 4] )
__snake_case = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase ( self : str , A_ : Optional[int] , A_ : Tuple ) -> str:
__snake_case = TFViTModel(A_ , name='''vision_model''' )
__snake_case = TFBertModel(A_ , name='''text_model''' )
return vision_model, text_model
def lowercase ( self : List[str] ) -> Optional[int]:
__snake_case = TFViTModelTester(self )
__snake_case = TFBertModelTester(self )
__snake_case = vit_model_tester.prepare_config_and_inputs()
__snake_case = bert_model_tester.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = vision_config_and_inputs
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> int:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
__snake_case = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
__snake_case = 13
__snake_case = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__snake_case = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__snake_case = random_attention_mask([batch_size, 4] )
__snake_case = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase ( self : Dict , A_ : Union[str, Any] , A_ : Tuple , A_ : Union[str, Any] , A_ : str , A_ : List[Any]=None , **A_ : List[Any] ) -> int:
__snake_case , __snake_case = self.get_vision_text_model(A_ , A_ )
__snake_case = TFVisionTextDualEncoderModel(vision_model=A_ , text_model=A_ )
__snake_case = model(
input_ids=A_ , pixel_values=A_ , attention_mask=A_ , output_attentions=A_ )
__snake_case = output.vision_model_output.attentions
self.assertEqual(len(A_ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__snake_case = to_atuple(vision_model.config.image_size )
__snake_case = to_atuple(vision_model.config.patch_size )
__snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__snake_case = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__snake_case = output.text_model_output.attentions
self.assertEqual(len(A_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase ( self : str , A_ : Union[str, Any] , A_ : Any ) -> Tuple:
__snake_case = TFDeiTModel(A_ , name='''vision_model''' )
__snake_case = TFRobertaModel(A_ , name='''text_model''' )
return vision_model, text_model
def lowercase ( self : Tuple ) -> List[str]:
__snake_case = TFDeiTModelTester(self )
__snake_case = TFRobertaModelTester(self )
__snake_case = vit_model_tester.prepare_config_and_inputs()
__snake_case = bert_model_tester.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = vision_config_and_inputs
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Dict:
__snake_case = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
__snake_case = 13
__snake_case = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__snake_case = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__snake_case = random_attention_mask([batch_size, 4] )
__snake_case = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase ( self : Union[str, Any] , A_ : Optional[int] , A_ : List[Any] ) -> Union[str, Any]:
__snake_case = TFCLIPVisionModel(A_ , name='''vision_model''' )
__snake_case = TFBertModel(A_ , name='''text_model''' )
return vision_model, text_model
def lowercase ( self : Dict ) -> int:
__snake_case = TFCLIPVisionModelTester(self )
__snake_case = TFBertModelTester(self )
__snake_case = clip_model_tester.prepare_config_and_inputs()
__snake_case = bert_model_tester.prepare_config_and_inputs()
__snake_case , __snake_case = vision_config_and_inputs
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[Any] ) -> Optional[int]:
__snake_case = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=A_ )
__snake_case = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__snake_case = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=A_ , padding=A_ , return_tensors='''np''' )
__snake_case = model(**A_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__snake_case = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , A_ , atol=1E-3 ) ) | 564 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Dict= {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any]= ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int= ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any]= [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict= [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
A__ : int= _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__ : Union[str, Any]= input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[Any]= [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 20 | 0 |
'''simple docstring'''
def A_ ( snake_case ):
if len(snake_case ) <= 1:
return [tuple(snake_case )]
SCREAMING_SNAKE_CASE:List[Any] = []
def generate(snake_case , snake_case ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Tuple = arr[k - 1], arr[0]
generate(k - 1 , snake_case )
generate(len(snake_case ) , snake_case )
return res
if __name__ == "__main__":
A_ = input("Enter numbers separated by a comma:\n").strip()
A_ = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 143 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ = "▁"
A_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class _snake_case ( _a , unittest.TestCase ):
_A : List[str] = BertGenerationTokenizer
_A : Any = False
_A : int = True
def __UpperCamelCase ( self : List[str] ):
super().setUp()
SCREAMING_SNAKE_CASE:Union[str, Any] = BertGenerationTokenizer(SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:List[str] = "<s>"
SCREAMING_SNAKE_CASE:int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<unk>" )
self.assertEqual(vocab_keys[1] ,"<s>" )
self.assertEqual(vocab_keys[-1] ,"<pad>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) ,1_002 )
def __UpperCamelCase ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_000 )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:int = BertGenerationTokenizer(SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) ,[285, 46, 10, 170, 382] ,)
SCREAMING_SNAKE_CASE:List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] ,)
SCREAMING_SNAKE_CASE:Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ,)
SCREAMING_SNAKE_CASE:List[Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] ,)
@cached_property
def __UpperCamelCase ( self : List[Any] ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE:Optional[int] = "Hello World!"
SCREAMING_SNAKE_CASE:Optional[int] = [18_536, 2_260, 101]
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:int = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
SCREAMING_SNAKE_CASE:Any = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@require_torch
@slow
def __UpperCamelCase ( self : Dict ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
SCREAMING_SNAKE_CASE:Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE:List[str] = " ".join(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ ,return_tensors="pt" ,return_token_type_ids=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] ,return_tensors="pt" ,return_token_type_ids=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = BertGenerationConfig()
SCREAMING_SNAKE_CASE:List[Any] = BertGenerationEncoder(SCREAMING_SNAKE_CASE__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE__ )
model(**SCREAMING_SNAKE_CASE__ )
@slow
def __UpperCamelCase ( self : List[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE:Any = {"input_ids": [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ ,model_name="google/bert_for_seq_generation_L-24_bbc_encoder" ,revision="c817d1fd1be2ffa69431227a1fe320544943d4db" ,)
| 143 | 1 |
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int ) -> str:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
SCREAMING_SNAKE_CASE_ : List[str] =False
if num < 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] =True
SCREAMING_SNAKE_CASE_ : Tuple =-num
SCREAMING_SNAKE_CASE_ : list[int] =[]
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(UpperCAmelCase_ ) for e in binary )
return "0b" + "".join(str(UpperCAmelCase_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 431 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) -> str:
# Load configuration defined in the metadata file
with open(UpperCAmelCase_ ) as metadata_file:
SCREAMING_SNAKE_CASE_ : Dict =json.load(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Any =LukeConfig(use_entity_aware_attention=UpperCAmelCase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE_ : List[Any] =torch.load(UpperCAmelCase_ , map_location='''cpu''' )
# Load the entity vocab file
SCREAMING_SNAKE_CASE_ : int =load_entity_vocab(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE_ : Optional[int] =AddedToken('''<ent>''' , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =AddedToken('''<ent2>''' , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =LukeTokenizer.from_pretrained(UpperCAmelCase_ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE_ : List[str] =state_dict['''embeddings.word_embeddings.weight''']
SCREAMING_SNAKE_CASE_ : Optional[Any] =word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : Tuple =word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : List[str] =torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =f'encoder.layer.{layer_index}.attention.self.'
SCREAMING_SNAKE_CASE_ : List[Any] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ : Optional[Any] =state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE_ : int =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE_ : Optional[int] =state_dict['''entity_embeddings.entity_embeddings.weight''']
SCREAMING_SNAKE_CASE_ : List[str] =entity_emb[entity_vocab['''[MASK]''']]
SCREAMING_SNAKE_CASE_ : Any =LukeModel(config=UpperCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple =model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
if not (len(UpperCAmelCase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(UpperCAmelCase_ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
SCREAMING_SNAKE_CASE_ : int =LukeTokenizer.from_pretrained(UpperCAmelCase_ , task='''entity_classification''' )
SCREAMING_SNAKE_CASE_ : List[str] =(
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
SCREAMING_SNAKE_CASE_ : List[Any] =(3_9, 4_2)
SCREAMING_SNAKE_CASE_ : Tuple =tokenizer(UpperCAmelCase_ , entity_spans=[span] , add_prefix_space=UpperCAmelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =model(**UpperCAmelCase_ )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE_ : Tuple =torch.Size((1, 4_2, 1_0_2_4) )
SCREAMING_SNAKE_CASE_ : Any =torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
SCREAMING_SNAKE_CASE_ : List[str] =torch.Size((1, 4_2, 7_6_8) )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE_ : Any =torch.Size((1, 1, 1_0_2_4) )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.Size((1, 1, 7_6_8) )
SCREAMING_SNAKE_CASE_ : Tuple =torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(UpperCAmelCase_ ) )
model.save_pretrained(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Any ) -> int:
SCREAMING_SNAKE_CASE_ : str ={}
with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =line.rstrip().split('''\t''' )
SCREAMING_SNAKE_CASE_ : Tuple =index
return entity_vocab
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_lowercase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 431 | 1 |
"""simple docstring"""
from __future__ import annotations
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
__snake_case = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )
__snake_case = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
__snake_case = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__snake_case = v
else:
__snake_case = v
__snake_case = chkpt["params"]
__snake_case = {n: v for n, v in config.items() if not isinstance(SCREAMING_SNAKE_CASE , (torch.FloatTensor, numpy.ndarray) )}
__snake_case = chkpt["dico_word2id"]
__snake_case = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
__snake_case = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__snake_case = pytorch_dump_folder_path + "/" + CONFIG_NAME
__snake_case = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + "\n" )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + "\n" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 163 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 634 |
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> int:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
__lowercase = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 45 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase_ = logging.getLogger(__name__)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Dict )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = False
def a ( self : Optional[int] , a_ : Union[str, Any] , a_ : Tuple , a_ : Dict , a_ : Tuple )-> Any:
"""simple docstring"""
if not self.initialized:
UpperCAmelCase_ : Any = RagRetriever(
a_ , question_encoder_tokenizer=a_ , generator_tokenizer=a_ , index=a_ , init_retrieval=a_ , )
UpperCAmelCase_ : Any = True
def a ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
self.retriever.index.init_index()
def a ( self : Any , a_ : Any , a_ : Dict )-> str:
"""simple docstring"""
UpperCAmelCase_ ,UpperCAmelCase_ : List[Any] = self.retriever._main_retrieve(a_ , a_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int , a_ : Optional[int] , a_ : List[Any] , a_ : Optional[int] , a_ : List[Any] , a_ : Union[str, Any]=None )-> Union[str, Any]:
"""simple docstring"""
if index is not None and index.is_initialized() and len(a_ ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
a_ , question_encoder_tokenizer=a_ , generator_tokenizer=a_ , index=a_ , init_retrieval=a_ , )
UpperCAmelCase_ : Union[str, Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(a_ , a_ , a_ , a_ )
for worker in self.retrieval_workers
] )
def a ( self : List[str] )-> str:
"""simple docstring"""
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def a ( self : Optional[Any] , a_ : Optional[Any] , a_ : List[str] )-> Tuple:
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
UpperCAmelCase_ : str = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
UpperCAmelCase_ ,UpperCAmelCase_ : List[str] = ray.get(random_worker.retrieve.remote(a_ , a_ ) )
else:
UpperCAmelCase_ ,UpperCAmelCase_ : Tuple = self._main_retrieve(a_ , a_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(a_ )
@classmethod
def a ( cls : Dict , a_ : List[Any] , a_ : Union[str, Any]=None , **a_ : int )-> str:
"""simple docstring"""
return super(a_ , cls ).get_tokenizers(a_ , a_ , **a_ )
@classmethod
def a ( cls : Tuple , a_ : Tuple , a_ : List[str] , a_ : Optional[int]=None , **a_ : int )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = kwargs.pop("""config""" , a_ ) or RagConfig.from_pretrained(a_ , **a_ )
UpperCAmelCase_ : Optional[int] = RagTokenizer.from_pretrained(a_ , config=a_ )
UpperCAmelCase_ : Optional[Any] = rag_tokenizer.question_encoder
UpperCAmelCase_ : List[str] = rag_tokenizer.generator
if indexed_dataset is not None:
UpperCAmelCase_ : Dict = """custom"""
UpperCAmelCase_ : List[Any] = CustomHFIndex(config.retrieval_vector_size , a_ )
else:
UpperCAmelCase_ : int = cls._build_index(a_ )
return cls(
a_ , question_encoder_tokenizer=a_ , generator_tokenizer=a_ , retrieval_workers=a_ , index=a_ , )
| 470 | 0 |
def lowerCamelCase__ ( _lowerCamelCase ) ->str:
return " ".join(
"".join(word[::-1] ) if len(_lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 592 |
from importlib import import_module
from .logging import get_logger
snake_case__ : Dict = get_logger(__name__)
class _a :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=None ):
_UpperCAmelCase =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , _snake_case , getattr(_snake_case , _snake_case ) )
_UpperCAmelCase =module._original_module if isinstance(_snake_case , _PatchedModuleObj ) else module
class _a :
"""simple docstring"""
snake_case =[]
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case=None ):
_UpperCAmelCase =obj
_UpperCAmelCase =target
_UpperCAmelCase =new
_UpperCAmelCase =target.split("." )[0]
_UpperCAmelCase ={}
_UpperCAmelCase =attrs or []
def __enter__( self ):
*_UpperCAmelCase , _UpperCAmelCase =self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_snake_case ) ):
try:
_UpperCAmelCase =import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_UpperCAmelCase =getattr(self.obj , _snake_case )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_snake_case , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_UpperCAmelCase =obj_attr
# patch at top level
setattr(self.obj , _snake_case , _PatchedModuleObj(_snake_case , attrs=self.attrs ) )
_UpperCAmelCase =getattr(self.obj , _snake_case )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_snake_case , _snake_case , _PatchedModuleObj(getattr(_snake_case , _snake_case , _snake_case ) , attrs=self.attrs ) )
_UpperCAmelCase =getattr(_snake_case , _snake_case )
# finally set the target attribute
setattr(_snake_case , _snake_case , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_UpperCAmelCase =getattr(import_module(".".join(_snake_case ) ) , _snake_case )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _snake_case ) is attr_value:
_UpperCAmelCase =getattr(self.obj , _snake_case )
setattr(self.obj , _snake_case , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_UpperCAmelCase =globals()["__builtins__"][target_attr]
setattr(self.obj , _snake_case , self.new )
else:
raise RuntimeError(F"Tried to patch attribute {target_attr} instead of a submodule." )
def __exit__( self , *_snake_case ):
for attr in list(self.original ):
setattr(self.obj , _snake_case , self.original.pop(_snake_case ) )
def SCREAMING_SNAKE_CASE ( self ):
self.__enter__()
self._active_patches.append(self )
def SCREAMING_SNAKE_CASE ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 592 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
snake_case__ = pytest.mark.integration
@require_faiss
class UpperCAmelCase ( __lowerCamelCase ):
def _lowerCAmelCase ( self : Dict ):
lowercase : int = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(lowerCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCAmelCase ( self : List[str] ):
import faiss
lowercase : Dataset = self._create_dummy_dataset()
lowercase : Tuple = dset.map(
lambda lowerCAmelCase , lowerCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCAmelCase , keep_in_memory=lowerCAmelCase )
lowercase : Dict = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase , lowercase : str = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def _lowerCAmelCase ( self : Dict ):
import faiss
lowercase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowercase , lowercase : Optional[Any] = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def _lowerCAmelCase ( self : Union[str, Any] ):
import faiss
lowercase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
lowercase , lowercase : Tuple = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def _lowerCAmelCase ( self : Dict ):
lowercase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(lowerCAmelCase , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCAmelCase ( self : Optional[int] ):
from elasticsearch import Elasticsearch
lowercase : Dataset = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
lowercase : int = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowercase : str = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
lowercase : Tuple = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=lowerCAmelCase )
lowercase , lowercase : Dict = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class UpperCAmelCase ( __lowerCamelCase ):
def _lowerCAmelCase ( self : Union[str, Any] ):
import faiss
lowercase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowercase : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowercase : Any = 1
lowercase , lowercase : Tuple = index.search(lowerCAmelCase )
self.assertRaises(lowerCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowercase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1]
lowercase , lowercase : List[str] = index.search_batch(lowerCAmelCase )
self.assertRaises(lowerCAmelCase , index.search_batch , queries[0] )
lowercase : Optional[int] = [scores[0] for scores in total_scores]
lowercase : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCAmelCase )
def _lowerCAmelCase ( self : Optional[Any] ):
import faiss
lowercase : Optional[Any] = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowercase : List[Any] = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCAmelCase ):
lowercase : Any = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def _lowerCAmelCase ( self : Tuple ):
import faiss
lowercase : Tuple = faiss.IndexFlat(5 )
lowercase : Dict = FaissIndex(custom_index=lowerCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCAmelCase ( self : Dict ):
import faiss
lowercase : Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase ) as tmp_file:
index.save(tmp_file.name )
lowercase : Dict = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowercase : Optional[Any] = np.zeros(5 , dtype=np.floataa )
lowercase : int = 1
lowercase , lowercase : Dict = index.search(lowerCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple ):
import faiss
lowercase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowercase : Tuple = '''index.faiss'''
lowercase : Any = f'''mock://{index_name}'''
index.save(UpperCAmelCase_ , storage_options=mockfs.storage_options )
lowercase : Optional[int] = FaissIndex.load(UpperCAmelCase_ , storage_options=mockfs.storage_options )
lowercase : Dict = np.zeros(5 , dtype=np.floataa )
lowercase : Tuple = 1
lowercase , lowercase : List[Any] = index.search(UpperCAmelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class UpperCAmelCase ( __lowerCamelCase ):
def _lowerCAmelCase ( self : Optional[Any] ):
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
lowercase : Optional[int] = Elasticsearch()
lowercase : Dict = {'''acknowledged''': True}
lowercase : Tuple = ElasticSearchIndex(es_client=lowerCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
lowercase : List[str] = '''foo'''
lowercase : Dict = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowercase , lowercase : Optional[Any] = index.search(lowerCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowercase : Tuple = '''foo'''
lowercase : List[Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowercase , lowercase : Union[str, Any] = index.search(lowerCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowercase : str = ['''foo''', '''bar''', '''foobar''']
lowercase : Union[str, Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowercase , lowercase : Optional[Any] = index.search_batch(lowerCAmelCase )
lowercase : List[Any] = [scores[0] for scores in total_scores]
lowercase : Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase )
# batched queries with timeout
lowercase : Optional[int] = ['''foo''', '''bar''', '''foobar''']
lowercase : Union[str, Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowercase , lowercase : int = index.search_batch(lowerCAmelCase , request_timeout=30 )
lowercase : Any = [scores[0] for scores in total_scores]
lowercase : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase )
| 583 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
snake_case__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ):
for attribute in key.split('''.''' ):
lowercase : Union[str, Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
lowercase : str = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
lowercase : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase : str = value
elif weight_type == "weight_g":
lowercase : Optional[Any] = value
elif weight_type == "weight_v":
lowercase : Optional[int] = value
elif weight_type == "bias":
lowercase : int = value
else:
lowercase : List[str] = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ):
lowercase : Union[str, Any] = []
lowercase : Union[str, Any] = fairseq_model.state_dict()
lowercase : List[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Any = True
if "*" in mapped_key:
lowercase : Tuple = name.split(UpperCAmelCase_ )[0].split('''.''' )[-2]
lowercase : List[Any] = mapped_key.replace('''*''' , UpperCAmelCase_ )
if "weight_g" in name:
lowercase : int = '''weight_g'''
elif "weight_v" in name:
lowercase : Tuple = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
lowercase : Union[str, Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Optional[int] = '''weight'''
else:
lowercase : List[str] = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ):
lowercase : Optional[Any] = full_name.split('''conv_layers.''' )[-1]
lowercase : List[str] = name.split('''.''' )
lowercase : List[str] = int(items[0] )
lowercase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase : Dict = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]=None ):
# load the pre-trained checkpoints
lowercase : Tuple = torch.load(UpperCAmelCase_ )
lowercase : Any = WavLMConfigOrig(checkpoint['''cfg'''] )
lowercase : Any = WavLMOrig(UpperCAmelCase_ )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
lowercase : Optional[int] = WavLMConfig.from_pretrained(UpperCAmelCase_ )
else:
lowercase : List[Any] = WavLMConfig()
lowercase : Tuple = WavLMModel(UpperCAmelCase_ )
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ )
hf_wavlm.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
snake_case__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 583 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[Any] =AutoConfig.from_pretrained(lowerCamelCase_ )
__magic_name__ : Tuple =FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ )
__magic_name__ : Union[str, Any] =checkpoints.load_tax_checkpoint(lowerCamelCase_ )
__magic_name__ : str ='wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
__magic_name__ : Union[str, Any] ='SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
__magic_name__ : Dict ='LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ : Dict ='TransientGlobalSelfAttention'
else:
raise ValueError(
"""Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`"""
""" attribute with a value from [\'local\', \'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
__magic_name__ : int =F"layers_{str(lowerCamelCase_ )}"
# Self-Attention
__magic_name__ : Any =tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
__magic_name__ : Optional[int] =tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
__magic_name__ : Optional[int] =tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
__magic_name__ : Optional[Any] =tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ : Optional[int] =tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
__magic_name__ : int =tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
__magic_name__ : int =tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
__magic_name__ : Dict =tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
__magic_name__ : Union[str, Any] =tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
__magic_name__ : List[str] =tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
__magic_name__ : List[Any] =tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
__magic_name__ : List[Any] =flax_model.params['encoder']['block'][str(lowerCamelCase_ )]['layer']
__magic_name__ : Tuple =tax_attention_key
__magic_name__ : Optional[Any] =tax_attention_out
__magic_name__ : Tuple =tax_attention_query
__magic_name__ : Union[str, Any] =tax_attention_value
__magic_name__ : str =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ : Union[str, Any] =tax_global_layer_norm
if split_mlp_wi:
__magic_name__ : int =tax_mlp_wi_a
__magic_name__ : int =tax_mlp_wi_a
else:
__magic_name__ : str =tax_mlp_wi
__magic_name__ : Tuple =tax_mlp_wo
__magic_name__ : Union[str, Any] =tax_mlp_layer_norm
__magic_name__ : Any =flax_model_encoder_layer_block
# Only for layer 0:
__magic_name__ : str =tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
__magic_name__ : Tuple =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
__magic_name__ : Union[str, Any] =tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
__magic_name__ : Optional[int] =tax_encoder_global_rel_embedding
# Assigning
__magic_name__ : Dict =tax_model['target']['encoder']['encoder_norm']['scale']
__magic_name__ : str =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
__magic_name__ : List[Any] =F"layers_{str(lowerCamelCase_ )}"
# Self-Attention
__magic_name__ : int =tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
__magic_name__ : Optional[int] =tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
__magic_name__ : Any =tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
__magic_name__ : Tuple =tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
__magic_name__ : Optional[Any] =tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
__magic_name__ : Optional[Any] =tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
__magic_name__ : List[Any] =tax_enc_dec_attention_module['key']['kernel']
__magic_name__ : str =tax_enc_dec_attention_module['out']['kernel']
__magic_name__ : List[Any] =tax_enc_dec_attention_module['query']['kernel']
__magic_name__ : Union[str, Any] =tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
__magic_name__ : List[Any] =tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
__magic_name__ : Union[str, Any] =tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
__magic_name__ : int =tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
__magic_name__ : List[Any] =tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
__magic_name__ : str =tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
__magic_name__ : Any =tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
__magic_name__ : List[str] =flax_model.params['decoder']['block'][str(lowerCamelCase_ )]['layer']
__magic_name__ : List[Any] =tax_attention_key
__magic_name__ : int =tax_attention_out
__magic_name__ : Tuple =tax_attention_query
__magic_name__ : Dict =tax_attention_value
__magic_name__ : int =tax_pre_attention_layer_norm
__magic_name__ : Optional[int] =tax_enc_dec_attention_key
__magic_name__ : List[Any] =tax_enc_dec_attention_out
__magic_name__ : int =tax_enc_dec_attention_query
__magic_name__ : Dict =tax_enc_dec_attention_value
__magic_name__ : Tuple =tax_cross_layer_norm
if split_mlp_wi:
__magic_name__ : int =tax_mlp_wi_a
__magic_name__ : Dict =tax_mlp_wi_a
else:
__magic_name__ : int =tax_mlp_wi
__magic_name__ : Any =tax_mlp_wo
__magic_name__ : Any =txa_mlp_layer_norm
__magic_name__ : Tuple =flax_model_decoder_layer_block
# Decoder Normalization
__magic_name__ : Optional[int] =tax_model['target']['decoder']['decoder_norm']['scale']
__magic_name__ : Optional[int] =txa_decoder_norm
# Only for layer 0:
__magic_name__ : Any =tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
__magic_name__ : List[Any] =tax_decoder_rel_embedding
# Token Embeddings
__magic_name__ : Dict =tax_model['target']['token_embedder']['embedding']
__magic_name__ : int =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
__magic_name__ : Union[str, Any] =tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(lowerCamelCase_ )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
UpperCAmelCase_ : str = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 706 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
def run_func(lowerCamelCase ):
@wraps(lowerCamelCase )
def run_in_eager_mode(*lowerCamelCase , **lowerCamelCase ):
return func(*lowerCamelCase , **lowerCamelCase )
@wraps(lowerCamelCase )
@tf.function(experimental_compile=lowerCamelCase )
def run_in_graph_mode(*lowerCamelCase , **lowerCamelCase ):
return func(*lowerCamelCase , **lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Dict =random.Random()
__magic_name__ : Union[str, Any] =[rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __A ( UpperCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = "TensorFlow"
@property
def A__ ( self :str ):
'''simple docstring'''
return tf.__version__
def A__ ( self :str , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ : Union[str, Any] =self._prepare_inference_func(__snake_case , __snake_case , __snake_case )
return self._measure_speed(_inference )
def A__ ( self :int , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Tuple =self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ : Any =self._prepare_train_func(__snake_case , __snake_case , __snake_case )
return self._measure_speed(_train )
def A__ ( self :str , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __snake_case )
__magic_name__ : int =self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ : Tuple =self._prepare_inference_func(__snake_case , __snake_case , __snake_case )
return self._measure_memory(_inference )
def A__ ( self :str , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __snake_case )
__magic_name__ : Tuple =self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ : Any =self._prepare_train_func(__snake_case , __snake_case , __snake_case )
return self._measure_memory(_train )
def A__ ( self :int , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
__magic_name__ : int =self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__magic_name__ : Any =(
hasattr(__snake_case , """architectures""" )
and isinstance(config.architectures , __snake_case )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__magic_name__ : Optional[int] ="""TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__magic_name__ : Optional[int] =__import__("""transformers""" , fromlist=[model_class] )
__magic_name__ : Optional[Any] =getattr(__snake_case , __snake_case )
__magic_name__ : Optional[Any] =model_cls(__snake_case )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__magic_name__ : Optional[int] =TF_MODEL_MAPPING[config.__class__](__snake_case )
# encoder-decoder has vocab size saved differently
__magic_name__ : List[str] =config.vocab_size if hasattr(__snake_case , """vocab_size""" ) else config.encoder.vocab_size
__magic_name__ : Any =random_input_ids(__snake_case , __snake_case , __snake_case )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__snake_case , decoder_input_ids=__snake_case , training=__snake_case )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__snake_case , training=__snake_case )
__magic_name__ : Tuple =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ ( self :int , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Tuple =self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__magic_name__ : int =(
hasattr(__snake_case , """architectures""" )
and isinstance(config.architectures , __snake_case )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__magic_name__ : List[Any] ="""TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__magic_name__ : Optional[int] =__import__("""transformers""" , fromlist=[model_class] )
__magic_name__ : str =getattr(__snake_case , __snake_case )
__magic_name__ : List[Any] =model_cls(__snake_case )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__magic_name__ : int =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__snake_case )
# encoder-decoder has vocab size saved differently
__magic_name__ : int =config.vocab_size if hasattr(__snake_case , """vocab_size""" ) else config.encoder.vocab_size
__magic_name__ : Optional[Any] =random_input_ids(__snake_case , __snake_case , __snake_case )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__magic_name__ : List[str] =model(__snake_case , decoder_input_ids=__snake_case , labels=__snake_case , training=__snake_case )[0]
__magic_name__ : int =tf.gradients(__snake_case , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__magic_name__ : str =model(__snake_case , labels=__snake_case , training=__snake_case )[0]
__magic_name__ : int =tf.gradients(__snake_case , model.trainable_variables )
return gradients
__magic_name__ : Union[str, Any] =encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ ( self :Any , __snake_case :Union[str, Any] ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(__snake_case , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__magic_name__ : Union[str, Any] =timeit.repeat(
__snake_case , repeat=self.args.repeat , number=10 , )
return min(__snake_case ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def A__ ( self :Any , __snake_case :Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
__magic_name__ : Union[str, Any] =start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
__magic_name__ : str ="""N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
__magic_name__ : List[str] =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__magic_name__ : Tuple =nvml.nvmlDeviceGetMemoryInfo(__snake_case )
__magic_name__ : Any =meminfo.used
__magic_name__ : str =Memory(__snake_case )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
__magic_name__ : List[str] =None
else:
__magic_name__ : List[Any] =measure_peak_memory_cpu(__snake_case )
__magic_name__ : str =Memory(__snake_case ) if isinstance(__snake_case , __snake_case ) else memory_bytes
if self.args.trace_memory_line_by_line:
__magic_name__ : List[Any] =stop_memory_tracing(__snake_case )
if memory is None:
__magic_name__ : Any =summary.total
else:
__magic_name__ : Optional[Any] =None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 367 | 0 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : list , lowerCAmelCase__ : int = 0 ):
a__ : List[Any] = length or len(lowerCAmelCase__ )
a__ : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
a__ , a__ : Optional[Any] = list_data[i + 1], list_data[i]
a__ : int = True
return list_data if not swapped else bubble_sort(lowerCAmelCase__ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "swin2sr"
__UpperCamelCase = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , A__ : int=6_4 , A__ : List[Any]=1 , A__ : List[Any]=3 , A__ : Any=1_8_0 , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Dict=8 , A__ : Any=2.0 , A__ : Optional[int]=True , A__ : Union[str, Any]=0.0 , A__ : Union[str, Any]=0.0 , A__ : List[str]=0.1 , A__ : Any="gelu" , A__ : Tuple=False , A__ : Optional[int]=0.02 , A__ : List[Any]=1E-5 , A__ : Any=2 , A__ : Union[str, Any]=1.0 , A__ : Dict="1conv" , A__ : Optional[Any]="pixelshuffle" , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**A__ )
a__ : List[str] = image_size
a__ : Optional[Any] = patch_size
a__ : Dict = num_channels
a__ : Optional[int] = embed_dim
a__ : int = depths
a__ : Optional[int] = len(A__ )
a__ : Dict = num_heads
a__ : List[Any] = window_size
a__ : Optional[int] = mlp_ratio
a__ : Optional[int] = qkv_bias
a__ : Union[str, Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = drop_path_rate
a__ : int = hidden_act
a__ : int = use_absolute_embeddings
a__ : Dict = layer_norm_eps
a__ : List[str] = initializer_range
a__ : List[Any] = upscale
a__ : List[Any] = img_range
a__ : Optional[int] = resi_connection
a__ : int = upsampler
| 688 | 1 |
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> str:
_UpperCAmelCase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> dict[str, str]:
_UpperCAmelCase = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
_UpperCAmelCase = remove_duplicates(key.upper() )
_UpperCAmelCase = len(__snake_case )
# First fill cipher with key characters
_UpperCAmelCase = {alphabet[i]: char for i, char in enumerate(__snake_case )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__snake_case ) , 2_6 ):
_UpperCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_UpperCAmelCase = alphabet[i - offset]
_UpperCAmelCase = char
return cipher_alphabet
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> str:
return "".join(cipher_map.get(__snake_case , __snake_case ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> str:
_UpperCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__snake_case , __snake_case ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( ) -> None:
_UpperCAmelCase = input("""Enter message to encode or decode: """ ).strip()
_UpperCAmelCase = input("""Enter keyword: """ ).strip()
_UpperCAmelCase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
_UpperCAmelCase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
_UpperCAmelCase = create_cipher_map(__snake_case )
print(func(__snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 700 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a: Any = logging.get_logger(__name__)
# General docstring
__a: List[str] = '''RegNetConfig'''
# Base docstring
__a: int = '''facebook/regnet-y-040'''
__a: Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
__a: int = '''facebook/regnet-y-040'''
__a: Any = '''tabby, tabby cat'''
__a: List[str] = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : int = 3 , lowerCamelCase : int = 1 , lowerCamelCase : int = 1 , lowerCamelCase : Optional[str] = "relu" , **lowerCamelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(**lowerCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCAmelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCAmelCase = tf.keras.layers.ConvaD(
filters=lowerCamelCase , kernel_size=lowerCamelCase , strides=lowerCamelCase , padding="""VALID""" , groups=lowerCamelCase , use_bias=lowerCamelCase , name="""convolution""" , )
_UpperCAmelCase = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
_UpperCAmelCase = ACTaFN[activation] if activation is not None else tf.identity
def lowerCamelCase ( self : Any , lowerCamelCase : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.convolution(self.padding(lowerCamelCase ) )
_UpperCAmelCase = self.normalization(lowerCamelCase )
_UpperCAmelCase = self.activation(lowerCamelCase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : RegNetConfig , **lowerCamelCase : str ) -> str:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = config.num_channels
_UpperCAmelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowerCamelCase ( self : str , lowerCamelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = shape_list(lowerCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCAmelCase = tf.transpose(lowerCamelCase , perm=(0, 2, 3, 1) )
_UpperCAmelCase = self.embedder(lowerCamelCase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : int = 2 , **lowerCamelCase : Any ) -> str:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = tf.keras.layers.ConvaD(
filters=lowerCamelCase , kernel_size=1 , strides=lowerCamelCase , use_bias=lowerCamelCase , name="""convolution""" )
_UpperCAmelCase = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""" )
def lowerCamelCase ( self : int , lowerCamelCase : tf.Tensor , lowerCamelCase : bool = False ) -> tf.Tensor:
"""simple docstring"""
return self.normalization(self.convolution(lowerCamelCase ) , training=lowerCamelCase )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int , **lowerCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase , name="""pooler""" )
_UpperCAmelCase = [
tf.keras.layers.ConvaD(filters=lowerCamelCase , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def lowerCamelCase ( self : str , lowerCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_UpperCAmelCase = self.pooler(lowerCamelCase )
for layer_module in self.attention:
_UpperCAmelCase = layer_module(lowerCamelCase )
_UpperCAmelCase = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : RegNetConfig , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int = 1 , **lowerCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = in_channels != out_channels or stride != 1
_UpperCAmelCase = max(1 , out_channels // config.groups_width )
_UpperCAmelCase = (
TFRegNetShortCut(lowerCamelCase , stride=lowerCamelCase , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCAmelCase = [
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowerCamelCase , stride=lowerCamelCase , groups=lowerCamelCase , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=lowerCamelCase , name="""layer.2""" ),
]
_UpperCAmelCase = ACTaFN[config.hidden_act]
def lowerCamelCase ( self : Dict , lowerCamelCase : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = hidden_state
for layer_module in self.layers:
_UpperCAmelCase = layer_module(lowerCamelCase )
_UpperCAmelCase = self.shortcut(lowerCamelCase )
hidden_state += residual
_UpperCAmelCase = self.activation(lowerCamelCase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : RegNetConfig , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int = 1 , **lowerCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = in_channels != out_channels or stride != 1
_UpperCAmelCase = max(1 , out_channels // config.groups_width )
_UpperCAmelCase = (
TFRegNetShortCut(lowerCamelCase , stride=lowerCamelCase , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
_UpperCAmelCase = [
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowerCamelCase , stride=lowerCamelCase , groups=lowerCamelCase , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(lowerCamelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(lowerCamelCase , kernel_size=1 , activation=lowerCamelCase , name="""layer.3""" ),
]
_UpperCAmelCase = ACTaFN[config.hidden_act]
def lowerCamelCase ( self : Dict , lowerCamelCase : Tuple ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = hidden_state
for layer_module in self.layers:
_UpperCAmelCase = layer_module(lowerCamelCase )
_UpperCAmelCase = self.shortcut(lowerCamelCase )
hidden_state += residual
_UpperCAmelCase = self.activation(lowerCamelCase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : RegNetConfig , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , **lowerCamelCase : Dict ) -> Dict:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
_UpperCAmelCase = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase , lowerCamelCase , lowerCamelCase , stride=lowerCamelCase , name="""layers.0""" ),
*[layer(lowerCamelCase , lowerCamelCase , lowerCamelCase , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCamelCase ( self : List[str] , lowerCamelCase : Tuple ) -> int:
"""simple docstring"""
for layer_module in self.layers:
_UpperCAmelCase = layer_module(lowerCamelCase )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : RegNetConfig , **lowerCamelCase : Tuple ) -> Any:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
_UpperCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase , lowerCamelCase , lowerCamelCase , depth=lowerCamelCase , name=f"""stages.{i+1}""" ) )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : tf.Tensor , lowerCamelCase : bool = False , lowerCamelCase : bool = True ) -> TFBaseModelOutputWithNoAttention:
"""simple docstring"""
_UpperCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase = hidden_states + (hidden_state,)
_UpperCAmelCase = stage_module(lowerCamelCase )
if output_hidden_states:
_UpperCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase , hidden_states=lowerCamelCase )
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
'''simple docstring'''
_lowerCamelCase = RegNetConfig
def __init__( self : int , lowerCamelCase : Dict , **lowerCamelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = config
_UpperCAmelCase = TFRegNetEmbeddings(lowerCamelCase , name="""embedder""" )
_UpperCAmelCase = TFRegNetEncoder(lowerCamelCase , name="""encoder""" )
_UpperCAmelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase , name="""pooler""" )
@unpack_inputs
def lowerCamelCase ( self : int , lowerCamelCase : tf.Tensor , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.embedder(lowerCamelCase , training=lowerCamelCase )
_UpperCAmelCase = self.encoder(
lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase )
_UpperCAmelCase = encoder_outputs[0]
_UpperCAmelCase = self.pooler(lowerCamelCase )
# Change to NCHW output format have uniformity in the modules
_UpperCAmelCase = tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) )
_UpperCAmelCase = tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCAmelCase = tuple([tf.transpose(lowerCamelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase , pooler_output=lowerCamelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = RegNetConfig
_lowerCamelCase = '''regnet'''
_lowerCamelCase = '''pixel_values'''
@property
def lowerCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
__a: Dict = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
__a: Union[str, Any] = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , lowerCamelCase : RegNetConfig , *lowerCamelCase : int , **lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
_UpperCAmelCase = TFRegNetMainLayer(lowerCamelCase , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase ( self : List[str] , lowerCamelCase : tf.Tensor , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Tuple=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
"""simple docstring"""
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.regnet(
pixel_values=lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : RegNetConfig , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> str:
"""simple docstring"""
super().__init__(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
_UpperCAmelCase = config.num_labels
_UpperCAmelCase = TFRegNetMainLayer(lowerCamelCase , name="""regnet""" )
# classification head
_UpperCAmelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase ( self : List[Any] , lowerCamelCase : tf.Tensor = None , lowerCamelCase : tf.Tensor = None , lowerCamelCase : bool = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[Any]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
"""simple docstring"""
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = self.regnet(
lowerCamelCase , output_hidden_states=lowerCamelCase , return_dict=lowerCamelCase , training=lowerCamelCase )
_UpperCAmelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase = self.classifier[0](lowerCamelCase )
_UpperCAmelCase = self.classifier[1](lowerCamelCase )
_UpperCAmelCase = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase , logits=lowerCamelCase )
if not return_dict:
_UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states ) | 402 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 100 ):
snake_case__ : str = 0
snake_case__ : Optional[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 297 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : List[str] = mock.Mock()
snake_case__ : Optional[int] = 5_0_0
snake_case__ : int = {}
snake_case__ : List[Any] = HTTPError
snake_case__ : List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ : List[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__A ) as mock_head:
snake_case__ : List[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowercase ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : Optional[Any] = mock.Mock()
snake_case__ : str = 5_0_0
snake_case__ : Union[str, Any] = {}
snake_case__ : Optional[int] = HTTPError
snake_case__ : List[str] = {}
# Download this model to make sure it's in the cache.
snake_case__ : Dict = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__A ) as mock_head:
snake_case__ : str = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self : Tuple ):
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ : int = tempfile.mktemp()
with open(__A , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , __A )
snake_case__ : Optional[int] = AlbertTokenizer.from_pretrained(__A )
finally:
os.remove(__A )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , __A )
snake_case__ : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def _lowercase ( self : Tuple ):
# This test is for deprecated behavior and can be removed in v5
snake_case__ : int = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _lowercase ( cls : str ):
snake_case__ : Union[str, Any] = TOKEN
HfFolder.save_token(__A )
@classmethod
def _lowercase ( cls : str ):
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def _lowercase ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Union[str, Any] = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : int = BertTokenizer(__A )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
snake_case__ : Any = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A , repo_id="test-tokenizer" , push_to_hub=__A , use_auth_token=self._token )
snake_case__ : Any = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _lowercase ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : int = BertTokenizer(__A )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
snake_case__ : Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__A , repo_id="valid_org/test-tokenizer-org" , push_to_hub=__A , use_auth_token=self._token )
snake_case__ : int = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _lowercase ( self : List[str] ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : List[Any] = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : Optional[int] = CustomTokenizer(__A )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case__ : Tuple = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : Optional[Any] = BertTokenizerFast.from_pretrained(__A )
bert_tokenizer.save_pretrained(__A )
snake_case__ : Union[str, Any] = CustomTokenizerFast.from_pretrained(__A )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
snake_case__ : int = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=__A , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ):
snake_case__ : List[Any] = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Tuple = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def _lowercase ( self : List[str] ):
snake_case__ : Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _lowercase ( self : Dict ):
snake_case__ : Union[str, Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _lowercase ( self : List[str] ):
snake_case__ : List[Any] = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[str] = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def _lowercase ( self : Optional[int] ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ : Dict = Trie()
snake_case__ : Tuple = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__A , ["AB", "C"] )
| 297 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def A ( A_ : Tuple=None ):
snake_case : Tuple = argparse.ArgumentParser(add_help=A_ , allow_abbrev=A_ )
# The main config parser
snake_case : List[str] = config_command_parser(A_ )
# The subparser to add commands to
snake_case : Tuple = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(A_ , parents=[parent_parser] )
update_command_parser(A_ , parents=[parent_parser] )
return config_parser
def A ( ):
snake_case : List[Any] = get_config_parser()
snake_case : str = config_parser.parse_args()
if not hasattr(A_ , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(A_ )
if __name__ == "__main__":
main()
| 555 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def A ( A_ : str ):
snake_case : List[str] = int(A_ )
snake_case, snake_case, snake_case : Any = t // 3600, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def A ( A_ : Optional[int] , A_ : int , A_ : List[str] , A_ : Optional[int] , A_ : Optional[int]=300 ):
# docstyle-ignore
return F"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def A ( A_ : Dict ):
snake_case : Tuple = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
snake_case : List[str] = F"""{elt:.6f}""" if isinstance(A_ , A_ ) else str(A_ )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class a :
_snake_case = 5
_snake_case = 0.2
def __init__( self : Union[str, Any], SCREAMING_SNAKE_CASE_ : int, SCREAMING_SNAKE_CASE_ : Optional[str] = None, SCREAMING_SNAKE_CASE_ : bool = True, SCREAMING_SNAKE_CASE_ : Optional["NotebookTrainingTracker"] = None, SCREAMING_SNAKE_CASE_ : int = 3_00, ):
snake_case : List[Any] = total
snake_case : Union[str, Any] = '''''' if prefix is None else prefix
snake_case : List[Any] = leave
snake_case : int = parent
snake_case : List[str] = width
snake_case : Optional[int] = None
snake_case : Optional[Any] = None
snake_case : Tuple = None
def __snake_case ( self : Any, SCREAMING_SNAKE_CASE_ : int, SCREAMING_SNAKE_CASE_ : bool = False, SCREAMING_SNAKE_CASE_ : str = None ):
snake_case : List[Any] = value
if comment is not None:
snake_case : Tuple = comment
if self.last_value is None:
snake_case : str = time.time()
snake_case : List[str] = value
snake_case : str = None
snake_case : Dict = self.warmup
snake_case : Tuple = 1
self.update_bar(SCREAMING_SNAKE_CASE_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total ):
if self.first_calls > 0:
self.first_calls -= 1
snake_case : Tuple = time.time()
snake_case : str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
snake_case : Any = self.elapsed_time / (value - self.start_value)
else:
snake_case : List[str] = None
if value >= self.total:
snake_case : List[Any] = self.total
snake_case : Tuple = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
snake_case : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = value
snake_case : List[Any] = current_time
if self.average_time_per_item is None:
snake_case : List[str] = 1
else:
snake_case : Optional[int] = max(int(self.update_every / self.average_time_per_item ), 1 )
def __snake_case ( self : Any, SCREAMING_SNAKE_CASE_ : List[str], SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
snake_case : Optional[Any] = ''' ''' * (len(str(self.total ) ) - len(str(SCREAMING_SNAKE_CASE_ ) )) + str(SCREAMING_SNAKE_CASE_ )
if self.elapsed_time is None:
snake_case : int = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
snake_case : List[str] = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
snake_case : Optional[int] = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def __snake_case ( self : Optional[int] ):
snake_case : str = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
snake_case : Any = disp.display(disp.HTML(self.html_code ), display_id=SCREAMING_SNAKE_CASE_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __snake_case ( self : Optional[int] ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class a ( __magic_name__ ):
def __init__( self : List[str], SCREAMING_SNAKE_CASE_ : List[str], SCREAMING_SNAKE_CASE_ : Optional[int]=None ):
super().__init__(SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = None if column_names is None else [column_names]
snake_case : str = None
def __snake_case ( self : Dict ):
snake_case : Tuple = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
snake_case : List[Any] = disp.display(disp.HTML(self.html_code ), display_id=SCREAMING_SNAKE_CASE_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __snake_case ( self : List[str], SCREAMING_SNAKE_CASE_ : Any ):
if self.inner_table is None:
snake_case : Optional[Any] = [list(values.keys() ), list(values.values() )]
else:
snake_case : Tuple = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(SCREAMING_SNAKE_CASE_ )
snake_case : int = columns
self.inner_table.append([values[c] for c in columns] )
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : Optional[Any], SCREAMING_SNAKE_CASE_ : int=None, SCREAMING_SNAKE_CASE_ : str=3_00 ):
snake_case : int = NotebookProgressBar(SCREAMING_SNAKE_CASE_, prefix=SCREAMING_SNAKE_CASE_, parent=self, width=SCREAMING_SNAKE_CASE_ )
return self.child_bar
def __snake_case ( self : Union[str, Any] ):
snake_case : int = None
self.display()
class a ( __magic_name__ ):
def __init__( self : Optional[Any] ):
snake_case : Dict = None
snake_case : Union[str, Any] = None
snake_case : Dict = False
def __snake_case ( self : List[str], SCREAMING_SNAKE_CASE_ : Any, SCREAMING_SNAKE_CASE_ : int, SCREAMING_SNAKE_CASE_ : str, **SCREAMING_SNAKE_CASE_ : List[str] ):
snake_case : str = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
snake_case : List[str] = 0
snake_case : Dict = 0
snake_case : List[Any] = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
snake_case : Tuple = NotebookTrainingTracker(state.max_steps, SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : List[Any], SCREAMING_SNAKE_CASE_ : Dict, SCREAMING_SNAKE_CASE_ : str, **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
snake_case : Tuple = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1, comment=F"""Epoch {epoch}/{state.num_train_epochs}""", force_update=self._force_next_update, )
snake_case : str = False
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : int, SCREAMING_SNAKE_CASE_ : Union[str, Any], SCREAMING_SNAKE_CASE_ : int, SCREAMING_SNAKE_CASE_ : Tuple=None, **SCREAMING_SNAKE_CASE_ : Tuple ):
if not has_length(SCREAMING_SNAKE_CASE_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
snake_case : Dict = self.training_tracker.add_child(len(SCREAMING_SNAKE_CASE_ ) )
else:
snake_case : Optional[Any] = NotebookProgressBar(len(SCREAMING_SNAKE_CASE_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __snake_case ( self : Optional[int], SCREAMING_SNAKE_CASE_ : Any, SCREAMING_SNAKE_CASE_ : Dict, SCREAMING_SNAKE_CASE_ : Union[str, Any], **SCREAMING_SNAKE_CASE_ : List[Any] ):
if self.prediction_bar is not None:
self.prediction_bar.close()
snake_case : Optional[int] = None
def __snake_case ( self : Optional[Any], SCREAMING_SNAKE_CASE_ : List[str], SCREAMING_SNAKE_CASE_ : Any, SCREAMING_SNAKE_CASE_ : Optional[int], SCREAMING_SNAKE_CASE_ : Optional[int]=None, **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
snake_case : List[str] = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
snake_case : List[Any] = state.global_step
self.training_tracker.write_line(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any, SCREAMING_SNAKE_CASE_ : Optional[int], SCREAMING_SNAKE_CASE_ : List[str], SCREAMING_SNAKE_CASE_ : List[str], SCREAMING_SNAKE_CASE_ : Optional[Any]=None, **SCREAMING_SNAKE_CASE_ : Any ):
if self.training_tracker is not None:
snake_case : List[Any] = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
snake_case : Union[str, Any] = log['''loss''']
break
if self.first_column == "Epoch":
snake_case : Optional[int] = int(state.epoch )
else:
snake_case : Optional[int] = state.global_step
snake_case : Optional[int] = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
snake_case : str = re.sub(R'''\_loss$''', '''''', SCREAMING_SNAKE_CASE_ )
snake_case : str = metrics.pop('''total_flos''', SCREAMING_SNAKE_CASE_ )
snake_case : int = metrics.pop('''epoch''', SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = metrics.pop(F"""{metric_key_prefix}_runtime""", SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = metrics.pop(F"""{metric_key_prefix}_samples_per_second""", SCREAMING_SNAKE_CASE_ )
snake_case : Any = metrics.pop(F"""{metric_key_prefix}_steps_per_second""", SCREAMING_SNAKE_CASE_ )
snake_case : Dict = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""", SCREAMING_SNAKE_CASE_ )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
snake_case : Optional[int] = v
else:
snake_case : int = k.split('''_''' )
snake_case : Any = ''' '''.join([part.capitalize() for part in splits[1:]] )
snake_case : Tuple = v
self.training_tracker.write_line(SCREAMING_SNAKE_CASE_ )
self.training_tracker.remove_child()
snake_case : Any = None
# Evaluation takes a long time so we should force the next update.
snake_case : Optional[int] = True
def __snake_case ( self : List[str], SCREAMING_SNAKE_CASE_ : List[Any], SCREAMING_SNAKE_CASE_ : List[str], SCREAMING_SNAKE_CASE_ : Dict, **SCREAMING_SNAKE_CASE_ : str ):
self.training_tracker.update(
state.global_step, comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""", force_update=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = None
| 555 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case_ ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
A_ = KandinskyVaaInpaintPipeline
A_ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
A_ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
A_ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
A_ = False
@property
def UpperCAmelCase__ ( self) -> Union[str, Any]:
return 3_2
@property
def UpperCAmelCase__ ( self) -> Optional[Any]:
return 3_2
@property
def UpperCAmelCase__ ( self) -> Tuple:
return self.time_input_dim
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self) -> int:
return 1_0_0
@property
def UpperCAmelCase__ ( self) -> Optional[int]:
torch.manual_seed(0)
UpperCamelCase = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCamelCase = UNetaDConditionModel(**lowerCamelCase_)
return model
@property
def UpperCAmelCase__ ( self) -> Optional[Any]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = self.dummy_unet
UpperCamelCase = self.dummy_movq
UpperCamelCase = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowerCamelCase_ , )
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> List[str]:
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowerCamelCase_)
# create init_image
UpperCamelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((2_5_6, 2_5_6))
# create mask
UpperCamelCase = np.ones((6_4, 6_4) , dtype=np.floataa)
UpperCamelCase = 0
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = '''cpu'''
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**lowerCamelCase_)
UpperCamelCase = pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_))
UpperCamelCase = output.images
UpperCamelCase = pipe(
**self.get_dummy_inputs(lowerCamelCase_) , return_dict=lowerCamelCase_ , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}')
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def UpperCAmelCase__ ( self) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
UpperCamelCase = np.ones((7_6_8, 7_6_8) , dtype=np.floataa)
UpperCamelCase = 0
UpperCamelCase = '''a hat'''
UpperCamelCase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa)
pipe_prior.to(lowerCamelCase_)
UpperCamelCase = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa)
UpperCamelCase = pipeline.to(lowerCamelCase_)
pipeline.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = torch.Generator(device='''cpu''').manual_seed(0)
UpperCamelCase , UpperCamelCase = pipe_prior(
lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCamelCase = pipeline(
image=lowerCamelCase_ , mask_image=lowerCamelCase_ , image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_) | 34 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 532 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self :Tuple ):
lowercase = 10
def __UpperCAmelCase ( self :List[str] ):
lowercase = [1, 2, 3, 4]
lowercase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase__ , self.block_size , 0 ) , lowercase__ )
def __UpperCAmelCase ( self :Dict ):
lowercase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lowercase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase__ , self.block_size , 0 ) , lowercase__ )
def __UpperCAmelCase ( self :Any ):
lowercase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
lowercase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase__ , self.block_size , 0 ) , lowercase__ )
def __UpperCAmelCase ( self :Optional[int] ):
lowercase = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
lowercase , lowercase = process_story(lowercase__ )
self.assertEqual(lowercase__ , [] )
def __UpperCAmelCase ( self :Optional[Any] ):
lowercase = ''
lowercase , lowercase = process_story(lowercase__ )
self.assertEqual(lowercase__ , [] )
self.assertEqual(lowercase__ , [] )
def __UpperCAmelCase ( self :List[str] ):
lowercase = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
lowercase , lowercase = process_story(lowercase__ )
lowercase = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(lowercase__ , lowercase__ )
lowercase = ['It was the best of times.']
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCAmelCase ( self :str ):
lowercase = torch.tensor([1, 2, 3, 4] )
lowercase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase__ , 0 ).numpy() , expected.numpy() )
def __UpperCAmelCase ( self :Any ):
lowercase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
lowercase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase__ , 23 ).numpy() , expected.numpy() )
def __UpperCAmelCase ( self :Union[str, Any] ):
lowercase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowercase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase__ , 1 ).numpy() , expected.numpy() )
def __UpperCAmelCase ( self :Optional[int] ):
lowercase = 101
lowercase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
lowercase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowercase = compute_token_type_ids(lowercase__ , lowercase__ )
np.testing.assert_array_equal(lowercase__ , lowercase__ )
| 714 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowercase = cst_fwd.get(_UpperCAmelCase , np.inf )
lowercase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowercase = new_cost_f
lowercase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowercase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = -1
lowercase = set()
lowercase = set()
lowercase = {source: 0}
lowercase = {destination: 0}
lowercase = {source: None}
lowercase = {destination: None}
lowercase = PriorityQueue()
lowercase = PriorityQueue()
lowercase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowercase , lowercase = queue_forward.get()
visited_forward.add(_UpperCAmelCase )
lowercase , lowercase = queue_backward.get()
visited_backward.add(_UpperCAmelCase )
lowercase = pass_and_relaxation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
lowercase = pass_and_relaxation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowercase = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self: Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =1
UpperCAmelCase_ =3
UpperCAmelCase_ =(32, 32)
UpperCAmelCase_ =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase__ ( self: Optional[int] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_lowerCAmelCase )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
def extract(*_lowerCAmelCase: List[Any] , **_lowerCAmelCase: int ):
class A :
def __init__( self: List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =torch.ones([0] )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def lowerCAmelCase__ ( self: Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ ="cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ =self.dummy_cond_unet
UpperCAmelCase_ =PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
UpperCAmelCase_ =self.dummy_vae
UpperCAmelCase_ =self.dummy_text_encoder
UpperCAmelCase_ =XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase_ =77
UpperCAmelCase_ =self.dummy_image.to(_lowerCAmelCase )
UpperCAmelCase_ =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ =AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
UpperCAmelCase_ =alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ ="A painting of a squirrel eating a burger"
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase_ =alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=_lowerCAmelCase , )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase_ =alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ =np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_cond_unet
UpperCAmelCase_ =PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
UpperCAmelCase_ =self.dummy_vae
UpperCAmelCase_ =self.dummy_text_encoder
UpperCAmelCase_ =XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase_ =77
UpperCAmelCase_ =self.dummy_image.to(_lowerCAmelCase )
# put models in fp16
UpperCAmelCase_ =unet.half()
UpperCAmelCase_ =vae.half()
UpperCAmelCase_ =bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ =AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
UpperCAmelCase_ =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
UpperCAmelCase_ =alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ ="A painting of a squirrel eating a burger"
UpperCAmelCase_ =torch.manual_seed(0 )
UpperCAmelCase_ =alt_pipe(
[prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type="np" , image=_lowerCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase__ ( self: List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase_ =init_image.resize((760, 504) )
UpperCAmelCase_ ="BAAI/AltDiffusion"
UpperCAmelCase_ =AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ ="A fantasy landscape, trending on artstation"
UpperCAmelCase_ =torch.manual_seed(0 )
UpperCAmelCase_ =pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type="np" , )
UpperCAmelCase_ =output.images[0]
UpperCAmelCase_ =image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCAmelCase_ =np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_ =init_image.resize((768, 512) )
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
UpperCAmelCase_ ="BAAI/AltDiffusion"
UpperCAmelCase_ =AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
UpperCAmelCase_ ="A fantasy landscape, trending on artstation"
UpperCAmelCase_ =torch.manual_seed(0 )
UpperCAmelCase_ =pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 54 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_lowercase = None
_lowercase = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
_lowercase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def A (__lowerCamelCase :int , __lowerCamelCase :Optional[Any]=1 , __lowerCamelCase :List[Any]=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def A (__lowerCamelCase :Any ):
with open(__lowerCamelCase , """r""" ) as f:
return json.load(__lowerCamelCase )
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :int ):
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple=True ):
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """tmp""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = read_json(os.path.join(__lowerCamelCase , """params.json""" ) )
_lowerCAmelCase = NUM_SHARDS[model_size]
_lowerCAmelCase = params["""n_layers"""]
_lowerCAmelCase = params["""n_heads"""]
_lowerCAmelCase = n_heads // num_shards
_lowerCAmelCase = params["""dim"""]
_lowerCAmelCase = dim // n_heads
_lowerCAmelCase = 10_000.0
_lowerCAmelCase = 1.0 / (base ** (torch.arange(0 , __lowerCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase = params["""n_kv_heads"""] # for GQA / MQA
_lowerCAmelCase = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase = n_heads
_lowerCAmelCase = n_heads_per_shard
_lowerCAmelCase = dim
# permute for sliced rotary
def permute(__lowerCamelCase :Optional[int] , __lowerCamelCase :str=n_heads , __lowerCamelCase :str=dim , __lowerCamelCase :List[Any]=dim ):
return w.view(__lowerCamelCase , dima // n_heads // 2 , 2 , __lowerCamelCase ).transpose(1 , 2 ).reshape(__lowerCamelCase , __lowerCamelCase )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase = torch.load(os.path.join(__lowerCamelCase , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
_lowerCAmelCase = [
torch.load(os.path.join(__lowerCamelCase , f'consolidated.{i:02d}.pth' ) , map_location="""cpu""" )
for i in range(__lowerCamelCase )
]
_lowerCAmelCase = 0
_lowerCAmelCase = {"""weight_map""": {}}
for layer_i in range(__lowerCamelCase ):
_lowerCAmelCase = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_lowerCAmelCase = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_lowerCAmelCase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__lowerCamelCase )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__lowerCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
# Write configs
_lowerCAmelCase = {"""total_size""": param_count * 2}
write_json(__lowerCamelCase , os.path.join(__lowerCamelCase , """pytorch_model.bin.index.json""" ) )
_lowerCAmelCase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_lowerCAmelCase = params["""multiple_of"""] if """multiple_of""" in params else 256
_lowerCAmelCase = LlamaConfig(
hidden_size=__lowerCamelCase , intermediate_size=compute_intermediate_size(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=__lowerCamelCase , )
config.save_pretrained(__lowerCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_lowerCAmelCase = LlamaForCausalLM.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=__lowerCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__lowerCamelCase , safe_serialization=__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Union[str, Any] ):
# Initialize the tokenizer based on the `spm` model
_lowerCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase = tokenizer_class(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
def A ():
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=__lowerCamelCase , help="""Whether or not to save using `safetensors`.""" )
_lowerCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , __lowerCamelCase )
if __name__ == "__main__":
main()
| 5 | 0 |
import mpmath # for roots of unity
import numpy as np
class __snake_case :
def __init__( self , _A=None , _A=None):
# Input as list
SCREAMING_SNAKE_CASE_ = list(poly_a or [0])[:]
SCREAMING_SNAKE_CASE_ = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE_ = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE_ = len(self.polyB)
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE_ = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE_ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
SCREAMING_SNAKE_CASE_ = self.__multiply()
def lowerCAmelCase__ ( self , _A):
SCREAMING_SNAKE_CASE_ = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(_A) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE_ = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE_ = [[] for i in range(_A)]
SCREAMING_SNAKE_CASE_ = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE_ = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_A):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE_ = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_A):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
SCREAMING_SNAKE_CASE_ = new_dft
SCREAMING_SNAKE_CASE_ = next_ncol // 2
return dft[0]
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.__dft('A')
SCREAMING_SNAKE_CASE_ = self.__dft('B')
SCREAMING_SNAKE_CASE_ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE_ = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE_ = [[] for i in range(_A)]
SCREAMING_SNAKE_CASE_ = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE_ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
SCREAMING_SNAKE_CASE_ = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE_ = [round(x[0].real , 8) + round(x[0].imag , 8) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self):
SCREAMING_SNAKE_CASE_ = 'A = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A]))
SCREAMING_SNAKE_CASE_ = 'B = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B]))
SCREAMING_SNAKE_CASE_ = 'A*B = ' + ' + '.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product))
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 620 |
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index == number_of_items:
return 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
SCREAMING_SNAKE_CASE_ = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 620 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __snake_case ( self : Optional[int] , _lowercase : List[Any]) -> Any:
if isinstance(_lowercase , _lowercase):
A_ = [label.strip() for label in labels.split(',') if label.strip()]
return labels
def __call__( self : Any , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Tuple) -> str:
if len(_lowercase) == 0 or len(_lowercase) == 0:
raise ValueError('You must include at least one label and at least one sequence.')
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(_lowercase))
if isinstance(_lowercase , _lowercase):
A_ = [sequences]
A_ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_lowercase)] for label in labels])
return sequence_pairs, sequences
@add_end_docstrings(lowerCAmelCase )
class __UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _lowercase : Any=ZeroShotClassificationArgumentHandler() , *_lowercase : Tuple , **_lowercase : Optional[Any]) -> List[str]:
A_ = args_parser
super().__init__(*_lowercase , **_lowercase)
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.')
@property
def __snake_case ( self : Any) -> Union[str, Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail'):
return ind
return -1
def __snake_case ( self : List[str] , _lowercase : Optional[Any] , _lowercase : List[Any]=True , _lowercase : List[str]=True , _lowercase : Union[str, Any]=TruncationStrategy.ONLY_FIRST , **_lowercase : int) -> str:
A_ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`')
A_ = self.tokenizer.eos_token
try:
A_ = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , padding=_lowercase , truncation=_lowercase , )
except Exception as e:
if "too short" in str(_lowercase):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , padding=_lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def __snake_case ( self : List[str] , **_lowercase : List[Any]) -> List[str]:
if kwargs.get('multi_class' , _lowercase) is not None:
A_ = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.')
A_ = {}
if "candidate_labels" in kwargs:
A_ = self._args_parser._parse_labels(kwargs['candidate_labels'])
if "hypothesis_template" in kwargs:
A_ = kwargs['hypothesis_template']
A_ = {}
if "multi_label" in kwargs:
A_ = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , _lowercase : Union[str, List[str]] , *_lowercase : Optional[int] , **_lowercase : List[Any] , ) -> List[str]:
if len(_lowercase) == 0:
pass
elif len(_lowercase) == 1 and "candidate_labels" not in kwargs:
A_ = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}')
return super().__call__(_lowercase , **_lowercase)
def __snake_case ( self : int , _lowercase : List[Any] , _lowercase : Tuple=None , _lowercase : Optional[int]="This example is {}.") -> Any:
A_ , A_ = self._args_parser(_lowercase , _lowercase , _lowercase)
for i, (candidate_label, sequence_pair) in enumerate(zip(_lowercase , _lowercase)):
A_ = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_lowercase) - 1,
**model_input,
}
def __snake_case ( self : int , _lowercase : Optional[Any]) -> Union[str, Any]:
A_ = inputs['candidate_label']
A_ = inputs['sequence']
A_ = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ = self.model(**_lowercase)
A_ = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def __snake_case ( self : Any , _lowercase : List[Any] , _lowercase : int=False) -> Union[str, Any]:
A_ = [outputs['candidate_label'] for outputs in model_outputs]
A_ = [outputs['sequence'] for outputs in model_outputs]
A_ = np.concatenate([output['logits'].numpy() for output in model_outputs])
A_ = logits.shape[0]
A_ = len(_lowercase)
A_ = N // n
A_ = logits.reshape((num_sequences, n, -1))
if multi_label or len(_lowercase) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ = self.entailment_id
A_ = -1 if entailment_id == 0 else 0
A_ = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ = np.exp(_lowercase) / np.exp(_lowercase).sum(-1 , keepdims=_lowercase)
A_ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ = reshaped_outputs[..., self.entailment_id]
A_ = np.exp(_lowercase) / np.exp(_lowercase).sum(-1 , keepdims=_lowercase)
A_ = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 366 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , **_lowercase : List[Any]) -> Union[str, Any]:
super().__init__(**_lowercase)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self : Optional[Any] , _lowercase : Union[str, List[str], "Image", List["Image"]] , **_lowercase : List[Any]) -> Any:
return super().__call__(_lowercase , **_lowercase)
def __snake_case ( self : int , **_lowercase : Union[str, Any]) -> Any:
A_ = {}
if "candidate_labels" in kwargs:
A_ = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
A_ = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __snake_case ( self : List[str] , _lowercase : Tuple , _lowercase : Any=None , _lowercase : Optional[int]="This is a photo of {}.") -> Union[str, Any]:
A_ = load_image(_lowercase)
A_ = self.image_processor(images=[image] , return_tensors=self.framework)
A_ = candidate_labels
A_ = [hypothesis_template.format(_lowercase) for x in candidate_labels]
A_ = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase)
A_ = [text_inputs]
return inputs
def __snake_case ( self : Optional[int] , _lowercase : Tuple) -> Optional[int]:
A_ = model_inputs.pop('candidate_labels')
A_ = model_inputs.pop('text_inputs')
if isinstance(text_inputs[0] , _lowercase):
A_ = text_inputs[0]
else:
# Batching case.
A_ = text_inputs[0][0]
A_ = self.model(**_lowercase , **_lowercase)
A_ = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __snake_case ( self : List[str] , _lowercase : int) -> Optional[int]:
A_ = model_outputs.pop('candidate_labels')
A_ = model_outputs['logits'][0]
if self.framework == "pt":
A_ = logits.softmax(dim=-1).squeeze(-1)
A_ = probs.tolist()
if not isinstance(_lowercase , _lowercase):
A_ = [scores]
elif self.framework == "tf":
A_ = stable_softmax(_lowercase , axis=-1)
A_ = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
A_ = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase) , key=lambda _lowercase: -x[0])
]
return result
| 366 | 1 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( __lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
return EnvironmentCommand()
def __lowerCAmelCase ( __lowerCAmelCase : str ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@staticmethod
def lowercase_ (lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Tuple = parser.add_parser("env" )
download_parser.set_defaults(func=lowerCAmelCase__ )
download_parser.add_argument(
"--accelerate-config_file" , default=lowerCAmelCase__ , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=lowerCAmelCase__ )
def __init__(self , lowerCAmelCase__ , *lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Dict = accelerate_config_file
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = "not installed"
if is_safetensors_available():
import safetensors
_UpperCamelCase : str = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
_UpperCamelCase : Optional[Any] = F"{safetensors.__version__} but is ignored because of PyTorch version too old."
_UpperCamelCase : str = "not installed"
_UpperCamelCase : Union[str, Any] = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_UpperCamelCase : Any = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase__ ):
_UpperCamelCase : Union[str, Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
_UpperCamelCase : List[Any] = (
"\n".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else F"\t{accelerate_config}"
)
_UpperCamelCase : Dict = "not installed"
_UpperCamelCase : Optional[int] = "NA"
if is_torch_available():
import torch
_UpperCamelCase : List[Any] = torch.__version__
_UpperCamelCase : str = torch.cuda.is_available()
_UpperCamelCase : Optional[int] = "not installed"
_UpperCamelCase : Tuple = "NA"
if is_tf_available():
import tensorflow as tf
_UpperCamelCase : Union[str, Any] = tf.__version__
try:
# deprecated in v2.1
_UpperCamelCase : Optional[Any] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_UpperCamelCase : List[Any] = bool(tf.config.list_physical_devices("GPU" ) )
_UpperCamelCase : int = "not installed"
_UpperCamelCase : Tuple = "not installed"
_UpperCamelCase : List[Any] = "not installed"
_UpperCamelCase : Any = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
_UpperCamelCase : Tuple = flax.__version__
_UpperCamelCase : int = jax.__version__
_UpperCamelCase : Optional[Any] = jaxlib.__version__
_UpperCamelCase : Tuple = jax.lib.xla_bridge.get_backend().platform
_UpperCamelCase : Any = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": F"{safetensors_version}",
"Accelerate version": F"{accelerate_version}",
"Accelerate config": F"{accelerate_config_str}",
"PyTorch version (GPU?)": F"{pt_version} ({pt_cuda_available})",
"Tensorflow version (GPU?)": F"{tf_version} ({tf_cuda_available})",
"Flax version (CPU?/GPU?/TPU?)": F"{flax_version} ({jax_backend})",
"Jax version": F"{jax_version}",
"JaxLib version": F"{jaxlib_version}",
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowerCAmelCase__ ) )
return info
@staticmethod
def lowercase_ (lowerCAmelCase__ ):
'''simple docstring'''
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 239 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=1_00 , lowerCAmelCase__=13 , lowerCAmelCase__=30 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , ):
'''simple docstring'''
_UpperCamelCase : Dict = parent
_UpperCamelCase : str = vocab_size
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Tuple = num_channels
_UpperCamelCase : Optional[int] = is_training
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Any = type_sequence_label_size
_UpperCamelCase : Union[str, Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : List[Any] = num_patches + 1
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Tuple = None
if self.use_labels:
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Optional[int] = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : List[Any] = FlaxBeitModel(config=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Tuple = FlaxBeitForMaskedImageModeling(config=lowerCAmelCase__ )
_UpperCamelCase : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.type_sequence_label_size
_UpperCamelCase : int = FlaxBeitForImageClassification(config=lowerCAmelCase__ )
_UpperCamelCase : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Dict = 1
_UpperCamelCase : Tuple = FlaxBeitForImageClassification(lowerCAmelCase__ )
_UpperCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : str = model(lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[Any] = config_and_inputs
_UpperCamelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Dict = FlaxBeitModelTester(self )
_UpperCamelCase : Tuple = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def lowercase_ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : int = model_class(lowerCAmelCase__ )
_UpperCamelCase : List[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Dict = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase : Tuple = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : Dict = model_class(lowerCAmelCase__ )
@jax.jit
def model_jitted(lowerCAmelCase__ , **lowerCAmelCase__ ):
return model(pixel_values=lowerCAmelCase__ , **lowerCAmelCase__ )
with self.subTest("JIT Enabled" ):
_UpperCamelCase : Optional[Any] = model_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCamelCase : int = model_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def lowercase_ (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase : Optional[int] = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
_UpperCamelCase : Optional[Any] = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(lowerCAmelCase__ )
def __lowerCAmelCase ( ) -> Dict:
_UpperCamelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ (self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Tuple = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
_UpperCamelCase : Union[str, Any] = self.default_image_processor
_UpperCamelCase : Union[str, Any] = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="np" ).pixel_values
# prepare bool_masked_pos
_UpperCamelCase : List[Any] = np.ones((1, 1_96) , dtype=lowerCAmelCase__ )
# forward pass
_UpperCamelCase : List[Any] = model(pixel_values=lowerCAmelCase__ , bool_masked_pos=lowerCAmelCase__ )
_UpperCamelCase : Dict = outputs.logits
# verify the logits
_UpperCamelCase : Tuple = (1, 1_96, 81_92)
self.assertEqual(logits.shape , lowerCAmelCase__ )
_UpperCamelCase : List[str] = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowerCAmelCase__ , atol=1E-2 ) )
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Dict = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=lowerCAmelCase__ , return_tensors="np" )
# forward pass
_UpperCamelCase : Optional[int] = model(**lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = outputs.logits
# verify the logits
_UpperCamelCase : List[str] = (1, 10_00)
self.assertEqual(logits.shape , lowerCAmelCase__ )
_UpperCamelCase : List[str] = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
_UpperCamelCase : Optional[Any] = 2_81
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ )
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Any = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
_UpperCamelCase : Any = self.default_image_processor
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="np" )
# forward pass
_UpperCamelCase : Optional[int] = model(**lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = outputs.logits
# verify the logits
_UpperCamelCase : Union[str, Any] = (1, 2_18_41)
self.assertEqual(logits.shape , lowerCAmelCase__ )
_UpperCamelCase : List[Any] = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
_UpperCamelCase : List[str] = 23_96
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ )
| 239 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.