code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
class snake_case_:
def __init__( self : str , UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : List[Any] = val
lowerCAmelCase : Dict = None
lowerCAmelCase : Optional[int] = None
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[str] ):
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase : Any = Node(UpperCamelCase_ )
else:
self.left.insert(UpperCamelCase_ )
elif val > self.val:
if self.right is None:
lowerCAmelCase : List[str] = Node(UpperCamelCase_ )
else:
self.right.insert(UpperCamelCase_ )
else:
lowerCAmelCase : Dict = val
def _snake_case ( _snake_case : List[str] , _snake_case : str ):
# Recursive traversal
if root:
inorder(root.left , _snake_case )
res.append(root.val )
inorder(root.right , _snake_case )
def _snake_case ( _snake_case : Tuple ):
# Build BST
if len(_snake_case ) == 0:
return arr
lowerCAmelCase : Any = Node(arr[0] )
for i in range(1 , len(_snake_case ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase : str = []
inorder(_snake_case , _snake_case )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 60 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='big_bird'
def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ):
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = type_vocab_size
_a = layer_norm_eps
_a = use_cache
_a = rescale_embeddings
_a = attention_type
_a = use_bias
_a = block_size
_a = num_random_blocks
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 63 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_a = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
_a = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = RobertaTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="replace" , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=False , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , **lowercase_ , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase_ ) != add_prefix_space:
UpperCAmelCase_ : Union[str, Any] = getattr(lowercase_ , pre_tok_state.pop("type" ) )
UpperCAmelCase_ : Any = add_prefix_space
UpperCAmelCase_ : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase_ : List[Any] = add_prefix_space
UpperCAmelCase_ : List[Any] = "post_processor"
UpperCAmelCase_ : Union[str, Any] = getattr(self.backend_tokenizer , lowercase_ , lowercase_ )
if tokenizer_component_instance:
UpperCAmelCase_ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_ : Any = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase_ : Optional[int] = tuple(state["cls"] )
UpperCAmelCase_ : List[Any] = False
if state.get("add_prefix_space" , lowercase_ ) != add_prefix_space:
UpperCAmelCase_ : int = add_prefix_space
UpperCAmelCase_ : Optional[int] = True
if state.get("trim_offsets" , lowercase_ ) != trim_offsets:
UpperCAmelCase_ : List[str] = trim_offsets
UpperCAmelCase_ : List[str] = True
if changes_to_apply:
UpperCAmelCase_ : Optional[Any] = getattr(lowercase_ , state.pop("type" ) )
UpperCAmelCase_ : Any = component_class(**lowercase_ )
setattr(self.backend_tokenizer , lowercase_ , lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else value
UpperCAmelCase_ : Optional[Any] = value
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = kwargs.get("is_split_into_words" , lowercase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = kwargs.get("is_split_into_words" , lowercase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_=None ):
"""simple docstring"""
UpperCAmelCase_ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 61 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , *,
__a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ):
super().__init__()
_a = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
_a = nn.Linear(__a , __a )
_a = nn.Linear(__a , __a )
# parameters for encoder hidden states
_a = clip_extra_context_tokens
_a = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
_a = nn.Linear(__a , __a )
_a = nn.LayerNorm(__a )
def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_a = image_embeddings.shape[0]
_a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_a = classifier_free_guidance_embeddings.expand(
__a , -1 )
_a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_a = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_a = self.embedding_proj(__a )
_a = self.clip_image_embeddings_project_to_time_embeddings(__a )
_a = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_a = self.clip_extra_context_tokens_proj(__a )
_a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
_a = clip_extra_context_tokens.permute(0 , 2 , 1 )
_a = self.encoder_hidden_states_proj(__a )
_a = self.text_encoder_hidden_states_norm(__a )
_a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 63 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_A = logging.get_logger(__name__) # pylint: disable=invalid-name
_A = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=8 ):
__UpperCamelCase =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCamelCase =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , ) -> int:
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
__UpperCamelCase =2 ** (len(self.movq.config.block_out_channels ) - 1)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
if latents is None:
__UpperCamelCase =randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__UpperCamelCase =latents.to(A_ )
__UpperCamelCase =latents * scheduler.init_noise_sigma
return latents
def _a ( self , A_=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__UpperCamelCase =torch.device(f'cuda:{gpu_id}' )
__UpperCamelCase =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def _a ( self , A_=0 ) -> Tuple:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__UpperCamelCase =torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCamelCase =None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCamelCase , __UpperCamelCase =cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
__UpperCamelCase =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ , A_ = 512 , A_ = 512 , A_ = 100 , A_ = 4.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , ) -> Dict:
__UpperCamelCase =self._execution_device
__UpperCamelCase =guidance_scale > 1.0
if isinstance(A_ , A_ ):
__UpperCamelCase =torch.cat(A_ , dim=0 )
__UpperCamelCase =image_embeds.shape[0] * num_images_per_prompt
if isinstance(A_ , A_ ):
__UpperCamelCase =torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
__UpperCamelCase =image_embeds.repeat_interleave(A_ , dim=0 )
__UpperCamelCase =negative_image_embeds.repeat_interleave(A_ , dim=0 )
__UpperCamelCase =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
__UpperCamelCase =self.scheduler.timesteps
__UpperCamelCase =self.unet.config.in_channels
__UpperCamelCase , __UpperCamelCase =downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
__UpperCamelCase =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
__UpperCamelCase =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCamelCase ={'image_embeds': image_embeds}
__UpperCamelCase =self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
__UpperCamelCase , __UpperCamelCase =noise_pred.split(latents.shape[1] , dim=1 )
__UpperCamelCase , __UpperCamelCase =noise_pred.chunk(2 )
__UpperCamelCase , __UpperCamelCase =variance_pred.chunk(2 )
__UpperCamelCase =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCamelCase =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCamelCase , __UpperCamelCase =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCamelCase =self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
__UpperCamelCase =self.movq.decode(A_ , force_not_quantize=A_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__UpperCamelCase =image * 0.5 + 0.5
__UpperCamelCase =image.clamp(0 , 1 )
__UpperCamelCase =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__UpperCamelCase =self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 62 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
A_ = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = GPTaTokenizer
def __init__( self: Any, a_: Optional[Any]=None, a_: Tuple=None, a_: Optional[int]=None, a_: Dict="<|endoftext|>", a_: Tuple="<|endoftext|>", a_: Optional[Any]="<|endoftext|>", a_: List[str]=False, **a_: Any, ):
'''simple docstring'''
super().__init__(
a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, )
_snake_case : List[Any] = kwargs.pop("""add_bos_token""", a_ )
_snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space:
_snake_case : Any = getattr(a_, pre_tok_state.pop("""type""" ) )
_snake_case : Union[str, Any] = add_prefix_space
_snake_case : Optional[int] = pre_tok_class(**a_ )
_snake_case : Any = add_prefix_space
def UpperCamelCase_ ( self: Tuple, *a_: List[str], **a_: Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], *a_: Optional[Any], **a_: Dict ):
'''simple docstring'''
_snake_case : Dict = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: List[Any], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : Any = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
def UpperCamelCase_ ( self: int, a_: "Conversation" ):
'''simple docstring'''
_snake_case : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a_, add_special_tokens=a_ ) + [self.eos_token_id] )
if len(a_ ) > self.model_max_length:
_snake_case : List[Any] = input_ids[-self.model_max_length :]
return input_ids
| 64 |
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ):
_a = 0.0
_a = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowercase , lowercase )
# Update the winning vector
_a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowercase , lowercase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63 | 0 |
import torch
from torch import nn
class A ( nn.Module ):
def __init__(self : Any , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Dict=1 , __UpperCAmelCase : int=False ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = n_token
UpperCAmelCase__ = d_embed
UpperCAmelCase__ = d_proj
UpperCAmelCase__ = cutoffs + [n_token]
UpperCAmelCase__ = [0] + self.cutoffs
UpperCAmelCase__ = div_val
UpperCAmelCase__ = self.cutoffs[0]
UpperCAmelCase__ = len(self.cutoffs ) - 1
UpperCAmelCase__ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase__ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCAmelCase__ = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase__ = nn.ModuleList()
UpperCAmelCase__ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__UpperCAmelCase , __UpperCAmelCase ) ) )
else:
self.out_projs.append(__UpperCAmelCase )
self.out_layers.append(nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase__ , UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__UpperCAmelCase , __UpperCAmelCase ) ) )
self.out_layers.append(nn.Linear(__UpperCAmelCase , r_idx - l_idx ) )
UpperCAmelCase__ = keep_order
def lowercase_ (self : int , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
if proj is None:
UpperCAmelCase__ = nn.functional.linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase__ = nn.functional.linear(__UpperCAmelCase , proj.t().contiguous() )
UpperCAmelCase__ = nn.functional.linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowercase_ (self : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=False ) -> Tuple:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase__ = hidden[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = hidden.view(-1 , hidden.size(-1 ) )
UpperCAmelCase__ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
UpperCAmelCase__ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase__ = self._compute_logit(__UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCAmelCase__ = labels != -1_0_0
UpperCAmelCase__ = torch.zeros_like(__UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase__ = (
-nn.functional.log_softmax(__UpperCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase__ = nn.functional.log_softmax(__UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ = self.out_layers[i].weight
UpperCAmelCase__ = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__UpperCAmelCase )
biases.append(__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ = self._compute_logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = nn.functional.log_softmax(__UpperCAmelCase , dim=1 )
if labels is None:
UpperCAmelCase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase__ = torch.zeros_like(__UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase__ = 0
UpperCAmelCase__ = [0] + self.cutoffs
for i in range(len(__UpperCAmelCase ) - 1 ):
UpperCAmelCase__ , UpperCAmelCase__ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase__ = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase__ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase__ = labels.index_select(0 , __UpperCAmelCase ) - l_idx
UpperCAmelCase__ = head_logprob.index_select(0 , __UpperCAmelCase )
UpperCAmelCase__ = hidden.index_select(0 , __UpperCAmelCase )
else:
UpperCAmelCase__ = hidden
if i == 0:
if labels is not None:
UpperCAmelCase__ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase__ = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ = self._compute_logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = nn.functional.log_softmax(__UpperCAmelCase , dim=1 )
UpperCAmelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase__ = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __UpperCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Any ) -> str:
"""simple docstring"""
if self.n_clusters == 0:
UpperCAmelCase__ = self._compute_logit(__UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ = self.out_layers[i].weight
UpperCAmelCase__ = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__UpperCAmelCase )
biases.append(__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ = self._compute_logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase__ = nn.functional.log_softmax(__UpperCAmelCase , dim=1 )
UpperCAmelCase__ = [0] + self.cutoffs
for i in range(len(__UpperCAmelCase ) - 1 ):
UpperCAmelCase__ , UpperCAmelCase__ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase__ = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ = self._compute_logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = nn.functional.log_softmax(__UpperCAmelCase , dim=1 )
UpperCAmelCase__ = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase__ = logprob_i
return out
| 65 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='OwlViTImageProcessor'
__a =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )):
_a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )]
elif isinstance(__a , __a ) and isinstance(text[0] , __a ):
_a = []
# Maximum number of queries across batch
_a = max([len(__a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__a ) != max_num_queries:
_a = t + [" "] * (max_num_queries - len(__a ))
_a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )
encodings.append(__a )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_a = BatchEncoding()
_a = input_ids
_a = attention_mask
if query_images is not None:
_a = BatchEncoding()
_a = self.image_processor(
__a , return_tensors=__a , **__a ).pixel_values
_a = query_pixel_values
if images is not None:
_a = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ):
return self.image_processor.post_process(*__a , **__a )
def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ):
return self.image_processor.post_process_object_detection(*__a , **__a )
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.image_processor.post_process_image_guided_detection(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__a = logging.getLogger(__name__)
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class lowerCamelCase :
'''simple docstring'''
_A : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_A : Optional[str] = field(
default=_lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_A : Optional[str] = field(
default=_lowerCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_A : Optional[str] = field(
default=_lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCamelCase :
'''simple docstring'''
_A : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_A : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
_A : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_A : bool = field(
default=_lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def A_ ( ):
'''simple docstring'''
snake_case_ :List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_, snake_case_, snake_case_ :List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""", _lowercase )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ :Optional[Any] = processors[data_args.task_name]()
snake_case_ :Tuple = processor.get_labels()
snake_case_ :int = len(_lowercase )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ :List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=_lowercase, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
snake_case_ :Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
snake_case_ :Union[str, Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=_lowercase, cache_dir=model_args.cache_dir, )
# Get datasets
snake_case_ :List[str] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=_lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
snake_case_ :Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=_lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
snake_case_ :Union[str, Any] = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(_lowercase, p.label_ids )}
# Data collator
snake_case_ :List[str] = DataCollatorWithPadding(_lowercase, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ :Optional[Any] = Trainer(
model=_lowercase, args=_lowercase, train_dataset=_lowercase, eval_dataset=_lowercase, compute_metrics=_lowercase, data_collator=_lowercase, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ :Optional[int] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case_ :List[str] = trainer.evaluate()
snake_case_ :Union[str, Any] = os.path.join(training_args.output_dir, """eval_results.txt""" )
if trainer.is_world_master():
with open(_lowercase, """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""", _lowercase, _lowercase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_lowercase )
return results
def A_ ( _lowercase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 66 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(lowercase ) ):
series.append(F'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 63 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
__UpperCAmelCase ={
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
__UpperCAmelCase ="</w>"
__UpperCAmelCase ="@@ "
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
return pairs
# Speech2Text2 has no max input length
__UpperCAmelCase ={"facebook/s2t-wav2vec2-large-en-de": 1_0_2_4}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Dict =["input_ids", "attention_mask"]
def __init__( self : int , a : List[str] , a : int="<s>" , a : Any="<pad>" , a : Union[str, Any]="</s>" , a : Optional[Any]="<unk>" , a : List[Any]=False , a : Tuple=None , **a : str , ):
"""simple docstring"""
super().__init__(
unk_token=a , bos_token=a , eos_token=a , pad_token=a , do_lower_case=a , **a , )
__lowerCamelCase = do_lower_case
with open(a , encoding='''utf-8''' ) as vocab_handle:
__lowerCamelCase = json.load(a )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
__lowerCamelCase = None
__lowerCamelCase = None
else:
with open(a , encoding='''utf-8''' ) as merges_handle:
__lowerCamelCase = merges_handle.read().split('''\n''' )[:-1]
__lowerCamelCase = [tuple(merge.split()[:2] ) for merge in merges]
__lowerCamelCase = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase = {}
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return len(self.decoder )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__lowerCamelCase = get_pairs(a )
if not pairs:
return token
while True:
__lowerCamelCase = min(a , key=lambda a : self.bpe_ranks.get(a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(a ):
try:
__lowerCamelCase = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(a )
__lowerCamelCase = new_word
if len(a ) == 1:
break
else:
__lowerCamelCase = get_pairs(a )
__lowerCamelCase = ''' '''.join(a )
if word == "\n " + BPE_TOKEN_MERGES:
__lowerCamelCase = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(a ):
__lowerCamelCase = word.replace(a , '''''' )
__lowerCamelCase = word.replace(''' ''' , a )
__lowerCamelCase = word
return word
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Any ):
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
__lowerCamelCase = text.lower()
__lowerCamelCase = text.split()
__lowerCamelCase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(a ).split(''' ''' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : str ):
"""simple docstring"""
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : int ):
"""simple docstring"""
__lowerCamelCase = self.decoder.get(a , self.unk_token )
return result
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = ''' '''.join(a )
# make sure @@ tokens are concatenated
__lowerCamelCase = ''''''.join(string.split(a ) )
return string
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + '''\n''' )
__lowerCamelCase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(a , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__lowerCamelCase = token_index
writer.write(''' '''.join(a ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 67 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]:
try:
if args.tpu_name:
_a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def _lowerCamelCase ( lowercase : List[str] ) -> Any:
_a = 0
for file in file_list:
_a = file.split("/" )[-1]
_a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 )
_a = int(lowercase )
num_samples += sample_count
return num_samples
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int:
_a = count_samples(lowercase )
_a = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
_a = dataset.shuffle(len(lowercase ) )
_a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
_a = dataset.shuffle(args.shuffle_buffer_size )
_a = dataset.batch(lowercase , drop_remainder=lowercase )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
_a = dataset.prefetch(lowercase )
return dataset
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
if not args.no_tpu:
_a = initialize_tpu(lowercase )
_a = tf.distribute.TPUStrategy(lowercase )
else:
_a = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
_a = AutoTokenizer.from_pretrained(args.tokenizer )
_a = AutoConfig.from_pretrained(args.pretrained_model_config )
_a = tokenizer.vocab_size
_a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
_a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
_a = count_samples(lowercase )
_a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_a = steps_per_epoch * args.num_epochs
with strategy.scope():
_a = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_a , _a = create_optimizer(
num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase , metrics=["accuracy"] )
def decode_fn(lowercase : int ):
_a = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase , lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_a = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" )
def mask_with_collator(lowercase : List[Any] ):
# TF really needs an isin() function
_a = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
_a , _a = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , )
return batch
_a = args.per_replica_batch_size * strategy.num_replicas_in_sync
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , )
_a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) )
model.fit(
lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase_ : Any = parse_args()
main(args)
| 63 | 0 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class a__ ( snake_case ):
"""simple docstring"""
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = tempfile.mkdtemp()
A__ = 8
# DPR tok
A__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A__ = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(lowercase , exist_ok=lowercase )
A__ = os.path.join(lowercase , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
A__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
A__ = dict(zip(lowercase , range(len(lowercase ) ) ) )
A__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
A__ = {"unk_token": "<unk>"}
A__ = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(lowercase , exist_ok=lowercase )
A__ = os.path.join(lowercase , BART_VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(lowercase , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase ) )
def UpperCamelCase ( self ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCamelCase ( self ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = os.path.join(self.tmpdirname , "rag_tokenizer" )
A__ = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
A__ = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowercase )
rag_tokenizer.save_pretrained(lowercase )
A__ = RagTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowercase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowercase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
A__ = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
A__ = tokenizer(lowercase )
self.assertIsNotNone(lowercase )
@slow
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
A__ = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
A__ = tokenizer(lowercase )
self.assertIsNotNone(lowercase )
| 68 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv3ImageProcessor'
__a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : Tuple ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase = '''true'''
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=82 , UpperCAmelCase=16 ) -> List[str]:
set_seed(42 )
snake_case_ = RegressionModel()
snake_case_ = deepcopy(UpperCAmelCase )
snake_case_ = RegressionDataset(length=UpperCAmelCase )
snake_case_ = DataLoader(UpperCAmelCase , batch_size=UpperCAmelCase )
model.to(accelerator.device )
snake_case_ , snake_case_ = accelerator.prepare(UpperCAmelCase , UpperCAmelCase )
return model, ddp_model, dataloader
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=False ) -> Dict:
snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
snake_case_ = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(UpperCAmelCase ):
snake_case_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
with accelerator.main_process_first():
snake_case_ = dataset.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
snake_case_ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase ):
if use_longest:
return tokenizer.pad(UpperCAmelCase , padding='longest' , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(UpperCAmelCase , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=16 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = Accelerator(dispatch_batches=UpperCAmelCase , split_batches=UpperCAmelCase )
snake_case_ = get_dataloader(UpperCAmelCase , not dispatch_batches )
snake_case_ = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=UpperCAmelCase )
snake_case_ , snake_case_ = accelerator.prepare(UpperCAmelCase , UpperCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
snake_case_ = []
for batch in dataloader:
snake_case_ , snake_case_ = batch.values()
with torch.no_grad():
snake_case_ = model(UpperCAmelCase )
snake_case_ , snake_case_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
snake_case_ , snake_case_ = [], []
for logit, targ in logits_and_targets:
logits.append(UpperCAmelCase )
targs.append(UpperCAmelCase )
snake_case_ , snake_case_ = torch.cat(UpperCAmelCase ), torch.cat(UpperCAmelCase )
return logits, targs
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=82 , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=16 ) -> Union[str, Any]:
snake_case_ , snake_case_ , snake_case_ = get_basic_setup(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = generate_predictions(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
assert (
len(UpperCAmelCase ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCAmelCase )}'
def UpperCAmelCase ( UpperCAmelCase = False , UpperCAmelCase = False ) -> Dict:
snake_case_ = evaluate.load('glue' , 'mrpc' )
snake_case_ , snake_case_ = get_mrpc_setup(UpperCAmelCase , UpperCAmelCase )
# First do baseline
snake_case_ , snake_case_ , snake_case_ = setup['no']
model.to(UpperCAmelCase )
model.eval()
for batch in dataloader:
batch.to(UpperCAmelCase )
with torch.inference_mode():
snake_case_ = model(**UpperCAmelCase )
snake_case_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=UpperCAmelCase , references=batch['labels'] )
snake_case_ = metric.compute()
# Then do distributed
snake_case_ , snake_case_ , snake_case_ = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
snake_case_ = model(**UpperCAmelCase )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ = batch['labels']
snake_case_ , snake_case_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=UpperCAmelCase , references=UpperCAmelCase )
snake_case_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def UpperCAmelCase ( ) -> Optional[int]:
snake_case_ = Accelerator(split_batches=UpperCAmelCase , dispatch_batches=UpperCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(UpperCAmelCase , UpperCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
snake_case_ = Accelerator(split_batches=UpperCAmelCase , dispatch_batches=UpperCAmelCase )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(UpperCAmelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
snake_case_ = Accelerator()
test_torch_metrics(UpperCAmelCase , 512 )
accelerator.state._reset_state()
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 69 |
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] =logging.get_logger(__name__)
A__ : Any =torch.device('''cpu''')
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = dct.pop(lowerCAmelCase )
_lowerCAmelCase = val
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
for k in state_dict.keys():
_lowerCAmelCase = k
if ".pwconv" in k:
_lowerCAmelCase = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
_lowerCAmelCase = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
_lowerCAmelCase = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
_lowerCAmelCase = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
_lowerCAmelCase = k_new.split(""".""" )
if ls[2].isdigit():
_lowerCAmelCase = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
_lowerCAmelCase = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_lowerCAmelCase = 10_00
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """imagenet-1k-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_lowerCAmelCase = [3, 3, 6, 4]
_lowerCAmelCase = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
_lowerCAmelCase = [3, 3, 9, 6]
_lowerCAmelCase = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
_lowerCAmelCase = [4, 3, 10, 5]
_lowerCAmelCase = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
_lowerCAmelCase = [4, 4, 12, 6]
_lowerCAmelCase = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
_lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" , check_hash=lowerCAmelCase )
else:
_lowerCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )
_lowerCAmelCase = checkpoint
_lowerCAmelCase = create_rename_keys(lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# load HuggingFace model
_lowerCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase ).eval()
hf_model.load_state_dict(lowerCAmelCase )
# prepare test inputs
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
_lowerCAmelCase = processor(images=lowerCAmelCase , return_tensors="""pt""" )
# compare outputs from both models
_lowerCAmelCase = get_expected_output(lowerCAmelCase )
_lowerCAmelCase = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , lowerCAmelCase , atol=1e-3 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
A__ : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
A__ : Tuple =parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 70 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 100 ) -> int:
_a = 0
_a = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 | 0 |
def A ( a_ ) -> list[int]:
if length <= 0 or not isinstance(a_ ,a_ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(a_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 71 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
if num < 0:
return False
_a = num
_a = 0
while num > 0:
_a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
lowerCAmelCase__ = '''facebook/wmt19-en-de'''
lowerCAmelCase__ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
lowerCAmelCase__ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
lowerCAmelCase__ = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
lowerCAmelCase__ = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowerCAmelCase__ = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
lowerCAmelCase__ = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 72 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 | 0 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(lowerCamelCase__ ), magnitude * sin(lowerCamelCase__ )]
return [magnitude * cos(radians(lowerCamelCase__ ) ), magnitude * sin(radians(lowerCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1_0**-1 ) -> bool:
__lowerCamelCase : NDArray[floataa] = cross(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : float = sum(lowerCamelCase__ )
return abs(lowerCamelCase__ ) < eps
if __name__ == "__main__":
# Test to check if it works
a =array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
a =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a =array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
a =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a =array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
a =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ : Any = get_tests_dir('fixtures')
lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
_a = 0
def UpperCamelCase__ ( self : str ):
_a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
_a = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : Optional[Any] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : List[Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCamelCase__ ( self : Any ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : Tuple ):
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 63 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( snake_case__ : Union[str, Any] ):
A = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Any = StableDiffusionLatentUpscalePipeline
_lowerCamelCase: Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
_lowerCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
_lowerCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase: Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase: Optional[int] = frozenset([] )
_lowerCamelCase: Tuple = True
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
A = 1
A = 4
A = (16, 16)
A = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(A_ )
return image
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
torch.manual_seed(0 )
A = UNetaDConditionModel(
act_fn='gelu' ,attention_head_dim=8 ,norm_num_groups=A_ ,block_out_channels=[32, 32, 64, 64] ,time_cond_proj_dim=160 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=32 ,down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) ,in_channels=8 ,mid_block_type=A_ ,only_cross_attention=A_ ,out_channels=5 ,resnet_time_scale_shift='scale_shift' ,time_embedding_type='fourier' ,timestep_post_act='gelu' ,up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') ,)
A = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
A = EulerDiscreteScheduler(prediction_type='sample' )
A = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='quick_gelu' ,projection_dim=512 ,)
A = CLIPTextModel(A_ )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any]=0 ) -> List[Any]:
if str(A_ ).startswith('mps' ):
A = torch.manual_seed(A_ )
else:
A = torch.Generator(device=A_ ).manual_seed(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = 'cpu'
A = self.get_dummy_components()
A = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 256, 256, 3) )
A = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A_ ,1e-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
super().test_save_load_local(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
A = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
A = self.get_dummy_components()
A = self.pipeline_class(**A_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 2
A = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
A = getattr(A_ ,scheduler_enum.name )
A = scheduler_cls.from_config(pipe.scheduler.config )
A = pipe(**A_ )[0]
outputs.append(A_ )
assert check_same_shape(A_ )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = torch.manual_seed(33 )
A = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ,torch_dtype=torch.floataa )
pipe.to('cuda' )
A = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa )
upscaler.to('cuda' )
A = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
A = pipe(A_ ,generator=A_ ,output_type='latent' ).images
A = upscaler(
prompt=A_ ,image=A_ ,num_inference_steps=20 ,guidance_scale=0 ,generator=A_ ,output_type='np' ,).images[0]
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A = torch.manual_seed(33 )
A = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa )
upscaler.to('cuda' )
A = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
A = upscaler(
prompt=A_ ,image=A_ ,num_inference_steps=20 ,guidance_scale=0 ,generator=A_ ,output_type='np' ,).images[0]
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5e-2 | 74 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='gpt_bigcode'
__a =['past_key_values']
__a ={
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = attention_softmax_in_fpaa
_a = scale_attention_softmax_in_fpaa
_a = multi_query
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
| 63 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
a_ : Dict = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ["""DPTFeatureExtractor"""]
a_ : int = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 75 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , a : int , a : Optional[int]=13 , a : Optional[int]=3 , a : int=224 , a : Optional[int]=30 , a : int=400 , a : Union[str, Any]=True , a : int=None , a : Tuple=True , a : Tuple=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : str = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : int = do_normalize
SCREAMING_SNAKE_CASE : Tuple = image_mean
SCREAMING_SNAKE_CASE : Tuple = image_std
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ViTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = EfficientFormerImageProcessorTester(self )
@property
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , ) | 76 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deta'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Dict ):
return self.d_model
def UpperCamelCase__ ( self : List[str] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 63 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
lowercase__ : str = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : Any = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase__ : Union[str, Any] = 4
lowercase__ : int = 48
lowercase__ : int = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : List[str] = [6, 6, 6, 6]
lowercase__ : Union[str, Any] = 60
lowercase__ : Union[str, Any] = [6, 6, 6, 6]
lowercase__ : Tuple = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Dict = 4
lowercase__ : Union[str, Any] = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase__ : Union[str, Any] = 1
lowercase__ : Optional[int] = 1
lowercase__ : Any = 126
lowercase__ : Tuple = 7
lowercase__ : Dict = 2_5_5.0
lowercase__ : Optional[Any] = ''
return config
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowercase__ : List[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowercase__ : Optional[int] = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
lowercase__ : List[Any] = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
lowercase__ : List[str] = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
lowercase__ : Optional[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase__ : Optional[int] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase__ : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase__ : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase__ : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase__ : int = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
lowercase__ : Optional[int] = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
lowercase__ : Dict = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
lowercase__ : int = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
lowercase__ : List[Any] = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
lowercase__ : Optional[Any] = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
lowercase__ : Optional[int] = 'layernorm.weight'
if name == "norm.bias":
lowercase__ : Union[str, Any] = 'layernorm.bias'
if "conv_first" in name:
lowercase__ : Any = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase__ : Optional[int] = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase__ : List[str] = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
lowercase__ : List[str] = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
lowercase__ : Tuple = name.replace('upsample.2' , 'upsample.convolution_1' )
lowercase__ : Optional[int] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
lowercase__ : Any = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
lowercase__ : Optional[Any] = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
lowercase__ : Optional[int] = 'swin2sr.' + name
return name
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase__ : List[str] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
lowercase__ : Optional[int] = key.split('.' )
lowercase__ : str = int(key_split[1] )
lowercase__ : List[str] = int(key_split[4] )
lowercase__ : List[str] = config.embed_dim
if "weight" in key:
lowercase__ : List[Any] = val[:dim, :]
lowercase__ : str = val[dim : dim * 2, :]
lowercase__ : List[str] = val[-dim:, :]
else:
lowercase__ : int = val[:dim]
lowercase__ : List[Any] = val[dim : dim * 2]
lowercase__ : Union[str, Any] = val[-dim:]
pass
else:
lowercase__ : Dict = val
return orig_state_dict
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : List[str] = get_config(_lowerCAmelCase )
lowercase__ : Optional[int] = SwinaSRForImageSuperResolution(_lowerCAmelCase )
model.eval()
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location='cpu' )
lowercase__ : List[Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ , lowercase__ : Dict = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_lowerCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"""Unexpected key {key} in state_dict""" )
# verify values
lowercase__ : int = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
lowercase__ : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert('RGB' )
lowercase__ : Optional[Any] = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase__ : int = 126 if 'Jpeg' in checkpoint_url else 256
lowercase__ : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ : str = transforms(_lowerCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowercase__ : str = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase__ : str = model(_lowerCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase__ : Dict = torch.Size([1, 3, 512, 512] )
lowercase__ : Optional[int] = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ : Any = torch.Size([1, 3, 1024, 1024] )
lowercase__ : str = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase__ : Tuple = torch.Size([1, 3, 1024, 1024] )
lowercase__ : str = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ : Union[str, Any] = torch.Size([1, 3, 512, 512] )
lowercase__ : str = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ : Dict = torch.Size([1, 3, 1024, 1024] )
lowercase__ : Any = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _lowerCAmelCase , atol=1E-3 )
print('Looks ok!' )
lowercase__ : Optional[int] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
lowercase__ : int = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub(f"""caidas/{model_name}""" )
processor.push_to_hub(f"""caidas/{model_name}""" )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
_UpperCamelCase : List[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 77 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase )
_a = tok.pad_token_id
def get_lens(lowercase : Optional[int] ):
_a = tqdm(
DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist()
_a = batch["labels"].ne(lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase , lowercase ):
max_lens.append(max(lowercase , lowercase ) )
else:
max_lens.extend(lowercase )
return max_lens
_a = get_lens(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase )
_a = get_lens(lowercase )
pickle_save(lowercase , train_ds.len_file )
pickle_save(lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 63 | 0 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :List[Any] , **lowercase_ :int ) -> Union[str, Any]:
super().__init__(**lowercase_ )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , **lowercase_ :int ) -> List[str]:
UpperCAmelCase = {}
UpperCAmelCase = {}
UpperCAmelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
UpperCAmelCase = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
UpperCAmelCase = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
UpperCAmelCase = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
UpperCAmelCase = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
UpperCAmelCase = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
UpperCAmelCase = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
UpperCAmelCase = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
UpperCAmelCase = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self :Tuple , lowercase_ :str , *lowercase_ :Tuple , lowercase_ :Optional[int]=None , lowercase_ :Tuple=None , **lowercase_ :str ) -> List[Any]:
return super().__call__(lowercase_ , *lowercase_ , num_workers=lowercase_ , batch_size=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Any , lowercase_ :Optional[Any]=64 , lowercase_ :int = 0 , lowercase_ :float = 5_12 / 15_00 , lowercase_ :Optional[int] = 32 , lowercase_ :Optional[int] = 1 , ) -> Union[str, Any]:
UpperCAmelCase = load_image(lowercase_ )
UpperCAmelCase = self.image_processor.size['longest_edge']
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.image_processor.generate_crop_boxes(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase = self.get_inference_context()
with inference_context():
UpperCAmelCase = self._ensure_tensor_on_device(lowercase_ , device=self.device )
UpperCAmelCase = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
UpperCAmelCase = image_embeddings
UpperCAmelCase = grid_points.shape[1]
UpperCAmelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , lowercase_ , lowercase_ ):
UpperCAmelCase = grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase = input_labels[:, i : i + points_per_batch]
UpperCAmelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Any , lowercase_ :Optional[Any]=0.88 , lowercase_ :Tuple=0.95 , lowercase_ :Optional[Any]=0 , lowercase_ :Optional[int]=1 , ) -> str:
UpperCAmelCase = model_inputs.pop('input_boxes' )
UpperCAmelCase = model_inputs.pop('is_last' )
UpperCAmelCase = model_inputs.pop('original_sizes' ).tolist()
UpperCAmelCase = model_inputs.pop('reshaped_input_sizes' ).tolist()
UpperCAmelCase = self.model(**lowercase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase = model_outputs['pred_masks']
UpperCAmelCase = self.image_processor.post_process_masks(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , binarize=lowercase_ )
UpperCAmelCase = model_outputs['iou_scores']
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Any , lowercase_ :Optional[Any]=False , lowercase_ :List[str]=False , lowercase_ :Union[str, Any]=0.7 , ) -> str:
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
UpperCAmelCase = torch.cat(lowercase_ )
UpperCAmelCase = torch.cat(lowercase_ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.image_processor.post_process_for_mask_generation(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = defaultdict(lowercase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase_ )
UpperCAmelCase = {}
if output_rle_mask:
UpperCAmelCase = rle_mask
if output_bboxes_mask:
UpperCAmelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 78 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
_a = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
# pass variant but use the non-variant filenames
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Dict ):
_a = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : List[str] ):
# pass variant but use the non-variant filenames
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 63 | 0 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''image_processor''', '''tokenizer''']
snake_case = '''AutoImageProcessor'''
snake_case = '''AutoTokenizer'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=None , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
_A = kwargs.pop("feature_extractor" )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
_A = self.image_processor
_A = False
def __call__( self : int , *__UpperCAmelCase : Dict , **__UpperCAmelCase : Dict ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
_A = kwargs.pop("images" , __UpperCAmelCase )
_A = kwargs.pop("text" , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
_A = args[0]
_A = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_A = self.image_processor(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
_A = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_A = encodings["input_ids"]
return inputs
def lowerCAmelCase ( self : List[str] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : str , **__UpperCAmelCase : Dict ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@contextmanager
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
_A = True
_A = self.tokenizer
yield
_A = self.image_processor
_A = False
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : str=None ):
'''simple docstring'''
if added_vocab is None:
_A = self.tokenizer.get_added_vocab()
_A = {}
while tokens:
_A = re.search(R"<s_(.*?)>" , __UpperCAmelCase , re.IGNORECASE )
if start_token is None:
break
_A = start_token.group(1 )
_A = re.search(Rf'''</s_{key}>''' , __UpperCAmelCase , re.IGNORECASE )
_A = start_token.group()
if end_token is None:
_A = tokens.replace(__UpperCAmelCase , "" )
else:
_A = end_token.group()
_A = re.escape(__UpperCAmelCase )
_A = re.escape(__UpperCAmelCase )
_A = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , __UpperCAmelCase , re.IGNORECASE )
if content is not None:
_A = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_A = self.tokenajson(__UpperCAmelCase , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase )
if value:
if len(__UpperCAmelCase ) == 1:
_A = value[0]
_A = value
else: # leaf nodes
_A = []
for leaf in content.split(R"<sep/>" ):
_A = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_A = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCAmelCase )
if len(output[key] ) == 1:
_A = output[key][0]
_A = tokens[tokens.find(__UpperCAmelCase ) + len(__UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase )
if len(__UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 79 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : bytes ) -> str:
return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] )
def _lowerCamelCase ( lowercase : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
'''simple docstring'''
from math import pi, sqrt
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(__A ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(__A )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
assert gamma(0.5 ) == sqrt(__A )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a__ : Any = 1.0
while num:
a__ : List[str] = float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 80 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any:
model.train()
_a = model(lowercase )
_a = F.mse_loss(lowercase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]:
set_seed(42 )
_a = RegressionModel()
_a = deepcopy(lowercase )
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
model.to(accelerator.device )
if sched:
_a = AdamW(params=model.parameters() , lr=1E-3 )
_a = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
_a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase )
else:
_a , _a = accelerator.prepare(lowercase , lowercase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]:
# Test when on a single CPU or GPU that the context manager does nothing
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : Tuple ) -> Tuple:
# Test on distributed setup that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
GradientState._reset_state()
def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
_a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _lowerCamelCase ( ) -> Any:
_a = Accelerator()
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
_a = RegressionDataset(length=96 )
_a = DataLoader(lowercase , batch_size=16 )
_a , _a = accelerator.prepare(lowercase , lowercase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if iteration < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if batch_num < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCamelCase ( ) -> Optional[Any]:
_a = Accelerator()
_a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowercase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowercase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowercase , lowercase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase )
def _lowerCamelCase ( lowercase : Any ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 63 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def _A ( lowercase , lowercase , lowercase , lowercase = 1_00 , ):
"""simple docstring"""
a =x_start
a =fnc(lowercase )
a =0.0
for _ in range(lowercase ):
# Approximates curve as a sequence of linear lines and sums their length
a =(x_end - x_start) / steps + xa
a =fnc(lowercase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
a =xa
a =fxa
return length
if __name__ == "__main__":
def _A ( lowercase ):
"""simple docstring"""
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowerCamelCase_ : Tuple = 1_0
while i <= 1_0_0_0_0_0:
print(F'With {i} steps: {line_length(f, -1_0, 1_0, i)}')
i *= 1_0 | 81 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='trocr'
__a =['past_key_values']
__a ={
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_layers
_a = decoder_attention_heads
_a = decoder_ffn_dim
_a = activation_function
_a = max_position_embeddings
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = scale_embedding
_a = use_learned_position_embeddings
_a = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 63 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=snake_case , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=snake_case , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=snake_case )
return parser.parse_args()
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = parse_args()
# Import training_script as a module.
_lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCAmelCase = script_fpath.stem
_lowerCAmelCase = importlib.import_module(snake_case )
# Patch sys.argv
_lowerCAmelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 82 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase_ : Any = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]:
with open(lowercase , "r" , encoding="utf-8" ) as f:
_a = f.read()
_a = content.split("\n" )
_a = []
_a = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def _lowerCamelCase ( lowercase : bool = False ) -> List[str]:
_a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )]
_a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
_a = [f for f, d in zip(lowercase , lowercase ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 63 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase__ :
lowercase__ = 42
lowercase__ = 42
class lowercase__ :
def __init__( self : List[str] ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : list[list[Edge]] = [[] for _ in range(lowerCamelCase__ )]
_UpperCamelCase : Dict = size
def __getitem__( self : Tuple ,lowerCamelCase__ : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self._size
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowerCamelCase__ ,lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = deque([start_vertex] )
_UpperCamelCase : list[int | None] = [None] * self.size
_UpperCamelCase : Union[str, Any] = 0
while queue:
_UpperCamelCase : str = queue.popleft()
_UpperCamelCase : int = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_UpperCamelCase : Optional[int] = current_distance + edge.weight
_UpperCamelCase : Any = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase__ ,lowerCamelCase__ )
and new_distance >= dest_vertex_distance
):
continue
_UpperCamelCase : Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='big_bird'
def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ):
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = type_vocab_size
_a = layer_norm_eps
_a = use_cache
_a = rescale_embeddings
_a = attention_type
_a = use_bias
_a = block_size
_a = num_random_blocks
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 63 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ : str , lowercase__ : list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = word_bank or []
# create a table
lowerCAmelCase_ :int = len(lowercase__ ) + 1
lowerCAmelCase_ :list[list[list[str]]] = []
for _ in range(lowercase__ ):
table.append([] )
# seed value
lowerCAmelCase_ :Optional[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase__ )] == word:
lowerCAmelCase_ :list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase__ )]:
combination.reverse()
return table[len(lowercase__ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 84 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , *,
__a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ):
super().__init__()
_a = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
_a = nn.Linear(__a , __a )
_a = nn.Linear(__a , __a )
# parameters for encoder hidden states
_a = clip_extra_context_tokens
_a = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
_a = nn.Linear(__a , __a )
_a = nn.LayerNorm(__a )
def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_a = image_embeddings.shape[0]
_a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_a = classifier_free_guidance_embeddings.expand(
__a , -1 )
_a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_a = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_a = self.embedding_proj(__a )
_a = self.clip_image_embeddings_project_to_time_embeddings(__a )
_a = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_a = self.clip_extra_context_tokens_proj(__a )
_a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
_a = clip_extra_context_tokens.permute(0 , 2 , 1 )
_a = self.encoder_hidden_states_proj(__a )
_a = self.text_encoder_hidden_states_norm(__a )
_a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 63 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return 12
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(a__ )
@property
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ = 12
snake_case_ = 12
snake_case_ = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
snake_case_ = TransformeraDModel(**a__ )
return model
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(learnable=a__ )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = "cpu"
snake_case_ = self.dummy_vqvae
snake_case_ = self.dummy_text_encoder
snake_case_ = self.dummy_tokenizer
snake_case_ = self.dummy_transformer
snake_case_ = VQDiffusionScheduler(self.num_embed )
snake_case_ = LearnedClassifierFreeSamplingEmbeddings(
learnable=a__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case_ = VQDiffusionPipeline(
vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , )
snake_case_ = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = "teddy bear playing in the pool"
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" )
snake_case_ = output.images
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipe(
[prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case_ = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
snake_case_ = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
snake_case_ = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case_ = torch.Generator(device=a__ ).manual_seed(0 )
snake_case_ = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 85 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63 | 0 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
return int((input_a, input_a).count(0 ) != 0 )
def __lowerCAmelCase ():
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 86 |
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ):
_a = 0.0
_a = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowercase , lowercase )
# Update the winning vector
_a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowercase , lowercase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
class snake_case_ ( __A ):
__A : Dict = "upernet"
def __init__( self : List[str] , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=5_12 , lowercase_ : str=0.02 , lowercase_ : Optional[int]=[1, 2, 3, 6] , lowercase_ : List[str]=True , lowercase_ : Dict=0.4 , lowercase_ : Optional[Any]=3_84 , lowercase_ : Optional[int]=2_56 , lowercase_ : int=1 , lowercase_ : str=False , lowercase_ : Optional[int]=2_55 , **lowercase_ : Union[str, Any] , ) -> List[Any]:
super().__init__(**lowercase_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : str = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowercase_ , lowercase_ ):
lowercase__ : List[Any] = backbone_config.get("model_type" )
lowercase__ : List[str] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : Tuple = config_class.from_dict(lowercase_ )
lowercase__ : int = backbone_config
lowercase__ : str = hidden_size
lowercase__ : Dict = initializer_range
lowercase__ : int = pool_scales
lowercase__ : int = use_auxiliary_head
lowercase__ : List[str] = auxiliary_loss_weight
lowercase__ : Union[str, Any] = auxiliary_in_channels
lowercase__ : Tuple = auxiliary_channels
lowercase__ : List[Any] = auxiliary_num_convs
lowercase__ : Tuple = auxiliary_concat_input
lowercase__ : Optional[Any] = loss_ignore_index
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Dict = self.backbone_config.to_dict()
lowercase__ : List[Any] = self.__class__.model_type
return output
| 87 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='OwlViTImageProcessor'
__a =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )):
_a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )]
elif isinstance(__a , __a ) and isinstance(text[0] , __a ):
_a = []
# Maximum number of queries across batch
_a = max([len(__a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__a ) != max_num_queries:
_a = t + [" "] * (max_num_queries - len(__a ))
_a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )
encodings.append(__a )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_a = BatchEncoding()
_a = input_ids
_a = attention_mask
if query_images is not None:
_a = BatchEncoding()
_a = self.image_processor(
__a , return_tensors=__a , **__a ).pixel_values
_a = query_pixel_values
if images is not None:
_a = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ):
return self.image_processor.post_process(*__a , **__a )
def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ):
return self.image_processor.post_process_object_detection(*__a , **__a )
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.image_processor.post_process_image_guided_detection(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : str = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(lowercase ) ):
series.append(F'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 63 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]:
try:
if args.tpu_name:
_a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def _lowerCamelCase ( lowercase : List[str] ) -> Any:
_a = 0
for file in file_list:
_a = file.split("/" )[-1]
_a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 )
_a = int(lowercase )
num_samples += sample_count
return num_samples
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int:
_a = count_samples(lowercase )
_a = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
_a = dataset.shuffle(len(lowercase ) )
_a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
_a = dataset.shuffle(args.shuffle_buffer_size )
_a = dataset.batch(lowercase , drop_remainder=lowercase )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
_a = dataset.prefetch(lowercase )
return dataset
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
if not args.no_tpu:
_a = initialize_tpu(lowercase )
_a = tf.distribute.TPUStrategy(lowercase )
else:
_a = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
_a = AutoTokenizer.from_pretrained(args.tokenizer )
_a = AutoConfig.from_pretrained(args.pretrained_model_config )
_a = tokenizer.vocab_size
_a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
_a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
_a = count_samples(lowercase )
_a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_a = steps_per_epoch * args.num_epochs
with strategy.scope():
_a = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_a , _a = create_optimizer(
num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase , metrics=["accuracy"] )
def decode_fn(lowercase : int ):
_a = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase , lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_a = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" )
def mask_with_collator(lowercase : List[Any] ):
# TF really needs an isin() function
_a = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
_a , _a = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , )
return batch
_a = args.per_replica_batch_size * strategy.num_replicas_in_sync
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , )
_a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) )
model.fit(
lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase_ : Any = parse_args()
main(args)
| 63 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = data
__lowerCamelCase = None
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = None
def __iter__( self ) -> Iterator[Any]:
'''simple docstring'''
__lowerCamelCase = self.head
while self.head:
yield node.data
__lowerCamelCase = node.next
if node == self.head:
break
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> List[str]:
'''simple docstring'''
return "->".join(str(lowerCamelCase__ ) for item in iter(self ) )
def lowercase_ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(0 , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
__lowerCamelCase = Node(lowerCamelCase__ )
if self.head is None:
__lowerCamelCase = new_node # first node points itself
__lowerCamelCase = __lowerCamelCase = new_node
elif index == 0: # insert at head
__lowerCamelCase = self.head
__lowerCamelCase = __lowerCamelCase = new_node
else:
__lowerCamelCase = self.head
for _ in range(index - 1 ):
__lowerCamelCase = temp.next
__lowerCamelCase = temp.next
__lowerCamelCase = new_node
if index == len(self ) - 1: # insert at tail
__lowerCamelCase = new_node
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return self.delete_nth(0 )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def lowercase_ ( self , lowerCamelCase__ = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
__lowerCamelCase = self.head
if self.head == self.tail: # just one node
__lowerCamelCase = __lowerCamelCase = None
elif index == 0: # delete head node
__lowerCamelCase = self.tail.next.next
__lowerCamelCase = self.head.next
else:
__lowerCamelCase = self.head
for _ in range(index - 1 ):
__lowerCamelCase = temp.next
__lowerCamelCase = temp.next
__lowerCamelCase = temp.next.next
if index == len(self ) - 1: # delete at tail
__lowerCamelCase = temp
return delete_node.data
def lowercase_ ( self ) -> bool:
'''simple docstring'''
return len(self ) == 0
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
__lowerCamelCase = CircularLinkedList()
assert len(UpperCamelCase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCamelCase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCamelCase__ ) == i
circular_linked_list.insert_nth(UpperCamelCase__ , i + 1 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv3ImageProcessor'
__a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : Tuple ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
"""simple docstring"""
from __future__ import annotations
def _A (__a , __a ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[list[int]] = []
create_all_state(1 , __a , __a , [] , __a )
return result
def _A (__a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__a , total_number - level + 2 ):
current_list.append(__a )
create_all_state(i + 1 , __a , level - 1 , __a , __a )
current_list.pop()
def _A (__a ) -> None:
"""simple docstring"""
for i in total_list:
print(*__a )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : int = 2
UpperCAmelCase_ : str = generate_all_combinations(n, k)
print_all_state(total_list)
| 91 |
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63 | 0 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
UpperCamelCase__ , UpperCamelCase__ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
UpperCamelCase__ = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
UpperCamelCase__ = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCamelCase__ = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 92 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 100 ) -> int:
_a = 0
_a = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 | 0 |
'''simple docstring'''
import random
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ : Optional[int] = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowercase_ : Dict = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
lowercase_ : Optional[int] = 0
lowercase_ , lowercase_ , lowercase_ : Optional[int] = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = len(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 93 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
if num < 0:
return False
_a = num
_a = 0
while num > 0:
_a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
import argparse
import copy
def __lowerCamelCase ( UpperCAmelCase_ : Dict ):
"""simple docstring"""
a :Union[str, Any] = {}
with open(UpperCAmelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
a :int = []
_list.append([line.split()[1], line.split()[2]] )
a :Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
a :List[Any] = []
_list.append([line.split()[0], line.split()[2]] )
a :str = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
with open(UpperCAmelCase_ ) as f:
a :Any = f.read(1 )
a :Dict = start_node
a :str = []
a :Dict = start_node
a :List[Any] = 0
while visiting not in first_solution:
a :Any = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCAmelCase_ ) and k[0] not in first_solution:
a :Tuple = k[1]
a :List[str] = k[0]
first_solution.append(UpperCAmelCase_ )
a :Dict = distance_of_first_solution + int(UpperCAmelCase_ )
a :Any = best_node
first_solution.append(UpperCAmelCase_ )
a :Any = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
a :Optional[int] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : str ):
"""simple docstring"""
a :Optional[int] = []
for n in solution[1:-1]:
a :int = solution.index(UpperCAmelCase_ )
for kn in solution[1:-1]:
a :List[Any] = solution.index(UpperCAmelCase_ )
if n == kn:
continue
a :int = copy.deepcopy(UpperCAmelCase_ )
a :Optional[int] = kn
a :Optional[Any] = n
a :Any = 0
for k in _tmp[:-1]:
a :Dict = _tmp[_tmp.index(UpperCAmelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
a :Optional[int] = distance + int(i[1] )
_tmp.append(UpperCAmelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
a :List[str] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCAmelCase_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
a :List[str] = 1
a :List[str] = first_solution
a :Optional[Any] = []
a :Any = distance_of_first_solution
a :List[str] = solution
while count <= iters:
a :List[str] = find_neighborhood(UpperCAmelCase_ , UpperCAmelCase_ )
a :Optional[Any] = 0
a :List[str] = neighborhood[index_of_best_solution]
a :Optional[int] = len(UpperCAmelCase_ ) - 1
a :Union[str, Any] = False
while not found:
a :int = 0
while i < len(UpperCAmelCase_ ):
if best_solution[i] != solution[i]:
a :Union[str, Any] = best_solution[i]
a :Optional[Any] = solution[i]
break
a :Tuple = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
a :Union[str, Any] = True
a :List[str] = best_solution[:-1]
a :Optional[int] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
a :Any = cost
a :List[str] = solution
else:
a :Optional[int] = index_of_best_solution + 1
a :Tuple = neighborhood[index_of_best_solution]
if len(UpperCAmelCase_ ) >= size:
tabu_list.pop(0 )
a :List[str] = count + 1
return best_solution_ever, best_cost
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any]=None ):
"""simple docstring"""
a :Any = generate_neighbours(args.File )
a , a :str = generate_first_solution(
args.File , UpperCAmelCase_ )
a , a :List[str] = tabu_search(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 94 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 | 0 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : List[str] =path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
a__ : Dict =Text(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , )
def _lowercase ( self ) -> Any:
'''simple docstring'''
if self.streaming:
a__ : Tuple =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
a__ : Union[str, Any] =None
a__ : Optional[int] =None
a__ : Union[str, Any] =None
a__ : Optional[Any] =None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
a__ : int =self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 95 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ : Any = get_tests_dir('fixtures')
lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
_a = 0
def UpperCamelCase__ ( self : str ):
_a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
_a = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : Optional[Any] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : List[Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCamelCase__ ( self : Any ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : Tuple ):
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 63 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """speech_to_text_2"""
lowerCamelCase__ = ["""past_key_values"""]
lowerCamelCase__ = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowercase=10000 , lowercase=6 , lowercase=2048 , lowercase=4 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=256 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=2 , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=1024 , **lowercase , ):
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : str = d_model
_lowerCamelCase : List[Any] = decoder_ffn_dim
_lowerCamelCase : Optional[int] = decoder_layers
_lowerCamelCase : Union[str, Any] = decoder_attention_heads
_lowerCamelCase : Dict = dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : List[str] = activation_dropout
_lowerCamelCase : Optional[int] = activation_function
_lowerCamelCase : Union[str, Any] = init_std
_lowerCamelCase : Optional[Any] = decoder_layerdrop
_lowerCamelCase : Optional[int] = use_cache
_lowerCamelCase : Any = decoder_layers
_lowerCamelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , **lowercase , ) | 96 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='gpt_bigcode'
__a =['past_key_values']
__a ={
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = attention_softmax_in_fpaa
_a = scale_attention_softmax_in_fpaa
_a = multi_query
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
| 63 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'deberta-v2'
def __init__( self , UpperCamelCase_=128100 , UpperCamelCase_=1536 , UpperCamelCase_=24 , UpperCamelCase_=24 , UpperCamelCase_=6144 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=0 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-7 , UpperCamelCase_=False , UpperCamelCase_=-1 , UpperCamelCase_=0 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=0 , UpperCamelCase_="gelu" , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Union[str, Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Optional[Any] = intermediate_size
UpperCamelCase__ :int = hidden_act
UpperCamelCase__ :Dict = hidden_dropout_prob
UpperCamelCase__ :str = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :Dict = initializer_range
UpperCamelCase__ :Any = relative_attention
UpperCamelCase__ :int = max_relative_positions
UpperCamelCase__ :List[Any] = pad_token_id
UpperCamelCase__ :List[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase_ ) == str:
UpperCamelCase__ :Optional[Any] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
UpperCamelCase__ :List[str] = pos_att_type
UpperCamelCase__ :Dict = vocab_size
UpperCamelCase__ :Optional[Any] = layer_norm_eps
UpperCamelCase__ :Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = pooler_dropout
UpperCamelCase__ :str = pooler_hidden_act
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return 12
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = 3 , UpperCamelCase_ = 40 , UpperCamelCase_ = 40 , UpperCamelCase_ = None , ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase_ , framework=UpperCamelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 97 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63 | 0 |
"""simple docstring"""
from __future__ import annotations
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ = set(lowerCamelCase ), [start]
while stack:
UpperCAmelCase__ = stack.pop()
explored.add(lowerCamelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(lowerCamelCase )
return explored
lowerCAmelCase__ : Any = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 98 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deta'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Dict ):
return self.d_model
def UpperCamelCase__ ( self : List[str] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 63 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowercase : Tuple = logging.get_logger(__name__)
class A__ :
"""simple docstring"""
__A : str
__A : str = None
@staticmethod
def __lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError
def __lowercase ( self , lowercase , lowercase , lowercase , **lowercase) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def __lowercase ( self , lowercase) -> str:
'''simple docstring'''
raise NotImplementedError
def __lowercase ( self) -> List[str]:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.')
@classmethod
def __lowercase ( cls) -> int:
'''simple docstring'''
return F'`pip install {cls.pip_package or cls.name}`'
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Dict = '''optuna'''
@staticmethod
def __lowercase ( ) -> Optional[Any]:
'''simple docstring'''
return is_optuna_available()
def __lowercase ( self , lowercase , lowercase , lowercase , **lowercase) -> List[str]:
'''simple docstring'''
return run_hp_search_optuna(lowercase , lowercase , lowercase , **lowercase)
def __lowercase ( self , lowercase) -> int:
'''simple docstring'''
return default_hp_space_optuna(lowercase)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Any = '''ray'''
__A : Optional[int] = '''\'ray[tune]\''''
@staticmethod
def __lowercase ( ) -> List[Any]:
'''simple docstring'''
return is_ray_available()
def __lowercase ( self , lowercase , lowercase , lowercase , **lowercase) -> Any:
'''simple docstring'''
return run_hp_search_ray(lowercase , lowercase , lowercase , **lowercase)
def __lowercase ( self , lowercase) -> int:
'''simple docstring'''
return default_hp_space_ray(lowercase)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Union[str, Any] = '''sigopt'''
@staticmethod
def __lowercase ( ) -> List[Any]:
'''simple docstring'''
return is_sigopt_available()
def __lowercase ( self , lowercase , lowercase , lowercase , **lowercase) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_sigopt(lowercase , lowercase , lowercase , **lowercase)
def __lowercase ( self , lowercase) -> List[Any]:
'''simple docstring'''
return default_hp_space_sigopt(lowercase)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[Any] = '''wandb'''
@staticmethod
def __lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
return is_wandb_available()
def __lowercase ( self , lowercase , lowercase , lowercase , **lowercase) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_wandb(lowercase , lowercase , lowercase , **lowercase)
def __lowercase ( self , lowercase) -> Tuple:
'''simple docstring'''
return default_hp_space_wandb(lowercase)
lowercase : List[str] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def A_ ( ) -> str:
a__ : Union[str, Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(A__ ) > 0:
a__ : Any = available_backends[0].name
if len(A__ ) > 1:
logger.info(
F'{len(A__ )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 99 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase )
_a = tok.pad_token_id
def get_lens(lowercase : Optional[int] ):
_a = tqdm(
DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist()
_a = batch["labels"].ne(lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase , lowercase ):
max_lens.append(max(lowercase , lowercase ) )
else:
max_lens.extend(lowercase )
return max_lens
_a = get_lens(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase )
_a = get_lens(lowercase )
pickle_save(lowercase , train_ds.len_file )
pickle_save(lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 63 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name
__magic_name__ = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Union[PIL.Image.Image, np.ndarray]
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
super().__init__()
self.register_modules(
prior=lowerCAmelCase__ , image_encoder=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , renderer=lowerCAmelCase__ , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
if latents is None:
__SCREAMING_SNAKE_CASE = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__)
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
__SCREAMING_SNAKE_CASE = latents.to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma
return latents
def snake_case_ ( self , lowerCAmelCase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""")
__SCREAMING_SNAKE_CASE = torch.device(f"cuda:{gpu_id}")
__SCREAMING_SNAKE_CASE = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__ , lowerCAmelCase__)
@property
def snake_case_ ( self):
if self.device != torch.device("""meta""") or not hasattr(self.image_encoder , """_hf_hook"""):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowerCAmelCase__ , """_hf_hook""")
and hasattr(module._hf_hook , """execution_device""")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and isinstance(image[0] , torch.Tensor):
__SCREAMING_SNAKE_CASE = torch.cat(lowerCAmelCase__ , axis=0) if image[0].ndim == 4 else torch.stack(lowerCAmelCase__ , axis=0)
if not isinstance(lowerCAmelCase__ , torch.Tensor):
__SCREAMING_SNAKE_CASE = self.image_processor(lowerCAmelCase__ , return_tensors="""pt""").pixel_values[0].unsqueeze(0)
__SCREAMING_SNAKE_CASE = image.to(dtype=self.image_encoder.dtype , device=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.image_encoder(lowerCAmelCase__)["""last_hidden_state"""]
__SCREAMING_SNAKE_CASE = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(lowerCAmelCase__ , dim=0)
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE = torch.zeros_like(lowerCAmelCase__)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds])
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase__)
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 2_5 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ):
if isinstance(lowerCAmelCase__ , PIL.Image.Image):
__SCREAMING_SNAKE_CASE = 1
elif isinstance(lowerCAmelCase__ , torch.Tensor):
__SCREAMING_SNAKE_CASE = image.shape[0]
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)):
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCAmelCase__)}")
__SCREAMING_SNAKE_CASE = self._execution_device
__SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
__SCREAMING_SNAKE_CASE = guidance_scale > 1.0
__SCREAMING_SNAKE_CASE = self._encode_image(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# prior
self.scheduler.set_timesteps(lowerCAmelCase__ , device=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.scheduler.timesteps
__SCREAMING_SNAKE_CASE = self.prior.config.num_embeddings
__SCREAMING_SNAKE_CASE = self.prior.config.embedding_dim
__SCREAMING_SNAKE_CASE = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__SCREAMING_SNAKE_CASE = latents.reshape(latents.shape[0] , lowerCAmelCase__ , lowerCAmelCase__)
for i, t in enumerate(self.progress_bar(lowerCAmelCase__)):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.prior(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , proj_embedding=lowerCAmelCase__ , ).predicted_image_embedding
# remove the variance
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = noise_pred.split(
scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = noise_pred.chunk(2)
__SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__SCREAMING_SNAKE_CASE = self.scheduler.step(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = []
for i, latent in enumerate(lowerCAmelCase__):
print()
__SCREAMING_SNAKE_CASE = self.renderer.decode(
latent[None, :] , lowerCAmelCase__ , size=lowerCAmelCase__ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.stack(lowerCAmelCase__)
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}")
__SCREAMING_SNAKE_CASE = images.cpu().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = [self.numpy_to_pil(lowerCAmelCase__) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowerCAmelCase__)
| 100 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
_a = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
# pass variant but use the non-variant filenames
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Dict ):
_a = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : List[str] ):
# pass variant but use the non-variant filenames
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 63 | 0 |
from ... import PretrainedConfig
lowercase__ :Any = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Tuple =NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowercase_ : Any ='''nezha'''
def __init__( self ,A__=2_1_1_2_8 ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=6_4 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=0.1 ,A__=0 ,A__=2 ,A__=3 ,A__=True ,**A__ ,):
super().__init__(pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,**A__)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 101 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : bytes ) -> str:
return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] )
def _lowerCamelCase ( lowercase : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def lowercase ( _snake_case : int="ro" , _snake_case : Dict="en" , _snake_case : int="wmt16" , _snake_case : List[str]=None ) ->None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__snake_case : Union[str, Any] = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
__snake_case : Optional[Any] = datasets.load_dataset(_snake_case , _snake_case )
if save_dir is None:
__snake_case : int = f"""{dataset}-{pair}"""
__snake_case : Union[str, Any] = Path(_snake_case )
save_dir.mkdir(exist_ok=_snake_case )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__snake_case : Union[str, Any] = '''val''' if split == '''validation''' else split
__snake_case : List[str] = save_dir.joinpath(f"""{fn}.source""" )
__snake_case : int = save_dir.joinpath(f"""{fn}.target""" )
__snake_case : Union[str, Any] = src_path.open('''w+''' )
__snake_case : Union[str, Any] = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__snake_case : List[str] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 102 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any:
model.train()
_a = model(lowercase )
_a = F.mse_loss(lowercase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]:
set_seed(42 )
_a = RegressionModel()
_a = deepcopy(lowercase )
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
model.to(accelerator.device )
if sched:
_a = AdamW(params=model.parameters() , lr=1E-3 )
_a = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
_a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase )
else:
_a , _a = accelerator.prepare(lowercase , lowercase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]:
# Test when on a single CPU or GPU that the context manager does nothing
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : Tuple ) -> Tuple:
# Test on distributed setup that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
GradientState._reset_state()
def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
_a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _lowerCamelCase ( ) -> Any:
_a = Accelerator()
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
_a = RegressionDataset(length=96 )
_a = DataLoader(lowercase , batch_size=16 )
_a , _a = accelerator.prepare(lowercase , lowercase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if iteration < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if batch_num < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCamelCase ( ) -> Optional[Any]:
_a = Accelerator()
_a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowercase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowercase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowercase , lowercase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase )
def _lowerCamelCase ( lowercase : Any ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 63 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A__ : int = logging.get_logger(__name__)
A__ : List[Any] = '''▁'''
A__ : Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A__ : Any = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
A__ : List[Any] = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
A__ : Any = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __snake_case ( UpperCamelCase_ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = ['''input_ids''', '''attention_mask''']
_a = []
_a = []
def __init__( self : str , A_ : Any , A_ : Optional[Any]="<s>" , A_ : int="</s>" , A_ : Dict="</s>" , A_ : int="<s>" , A_ : Any="<unk>" , A_ : Optional[Any]="<pad>" , A_ : Optional[int]="<mask>" , A_ : Optional[Any]=None , A_ : List[str]=None , A_ : List[str]=None , A_ : Optional[Dict[str, Any]] = None , A_ : Any=None , **A_ : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : int = AddedToken(A_ , lstrip=A_ , rstrip=A_) if isinstance(A_ , A_) else mask_token
lowerCAmelCase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , tokenizer_file=A_ , src_lang=A_ , tgt_lang=A_ , additional_special_tokens=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
lowerCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(A_))
lowerCAmelCase_ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase_ : str = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : Union[str, Any] = len(self.sp_model)
lowerCAmelCase_ : Any = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A_)
}
lowerCAmelCase_ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase_ : Union[str, Any] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
lowerCAmelCase_ : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase_ : Union[str, Any] = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
lowerCAmelCase_ : Dict = src_lang if src_lang is not None else '''en_XX'''
lowerCAmelCase_ : int = self.lang_code_to_id[self._src_lang]
lowerCAmelCase_ : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : Optional[Any]):
lowerCAmelCase_ : Dict = self.__dict__.copy()
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , A_ : Optional[int]):
lowerCAmelCase_ : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
lowerCAmelCase_ : List[Any] = {}
lowerCAmelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def UpperCAmelCase__ ( self : Tuple):
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase__ ( self : int):
return self._src_lang
@src_lang.setter
def UpperCAmelCase__ ( self : List[Any] , A_ : str):
lowerCAmelCase_ : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def UpperCAmelCase__ ( self : List[str] , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_)
lowerCAmelCase_ : Dict = [1] * len(self.prefix_tokens)
lowerCAmelCase_ : Optional[int] = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(A_)) + suffix_ones
return prefix_ones + ([0] * len(A_)) + ([0] * len(A_)) + suffix_ones
def UpperCAmelCase__ ( self : Optional[int] , A_ : List[int] , A_ : Optional[List[int]] = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None):
lowerCAmelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCAmelCase__ ( self : Any , A_ : int , A_ : str , A_ : Optional[str] , A_ : Optional[str] , **A_ : Optional[Any]):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''')
lowerCAmelCase_ : str = src_lang
lowerCAmelCase_ : Tuple = self(A_ , add_special_tokens=A_ , return_tensors=A_ , **A_)
lowerCAmelCase_ : Any = self.convert_tokens_to_ids(A_)
lowerCAmelCase_ : Optional[Any] = tgt_lang_id
return inputs
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Optional[Any] = {self.convert_ids_to_tokens(A_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCAmelCase__ ( self : List[Any] , A_ : str):
return self.sp_model.encode(A_ , out_type=A_)
def UpperCAmelCase__ ( self : List[Any] , A_ : Optional[Any]):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase_ : Any = self.sp_model.PieceToId(A_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : int , A_ : int):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCAmelCase__ ( self : Dict , A_ : int):
lowerCAmelCase_ : Union[str, Any] = ''''''.join(A_).replace(A_ , ''' ''').strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , A_ : str , A_ : Optional[str] = None):
if not os.path.isdir(A_):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
lowerCAmelCase_ : Optional[Any] = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(A_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , A_)
elif not os.path.isfile(self.vocab_file):
with open(A_ , '''wb''') as fi:
lowerCAmelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A_)
return (out_vocab_file,)
def UpperCAmelCase__ ( self : List[Any] , A_ : List[str] , A_ : str = "en_XX" , A_ : Optional[List[str]] = None , A_ : str = "ro_RO" , **A_ : Tuple , ):
lowerCAmelCase_ : int = src_lang
lowerCAmelCase_ : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(A_ , A_ , **A_)
def UpperCAmelCase__ ( self : List[str]):
return self.set_src_lang_special_tokens(self.src_lang)
def UpperCAmelCase__ ( self : List[str]):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Union[str, Any]):
lowerCAmelCase_ : List[Any] = self.lang_code_to_id[src_lang]
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : Tuple = [self.eos_token_id, self.cur_lang_code]
def UpperCAmelCase__ ( self : Optional[Any] , A_ : str):
lowerCAmelCase_ : List[Any] = self.lang_code_to_id[lang]
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Dict = [self.eos_token_id, self.cur_lang_code]
| 103 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='trocr'
__a =['past_key_values']
__a ={
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_layers
_a = decoder_attention_heads
_a = decoder_ffn_dim
_a = activation_function
_a = max_position_embeddings
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = scale_embedding
_a = use_learned_position_embeddings
_a = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 63 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int]=1_3 ,lowercase__ : Any=7 ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[int]=True ,lowercase__ : List[str]=True ,lowercase__ : str=True ,lowercase__ : Dict=9_9 ,lowercase__ : Union[str, Any]=3_2 ,lowercase__ : List[str]=5 ,lowercase__ : int=4 ,lowercase__ : Dict=3_7 ,lowercase__ : Union[str, Any]="gelu" ,lowercase__ : str=0.1 ,lowercase__ : List[str]=0.1 ,lowercase__ : Any=5_1_2 ,lowercase__ : Optional[int]=1_6 ,lowercase__ : Optional[int]=2 ,lowercase__ : Optional[int]=0.0_2 ,lowercase__ : Dict=False ,lowercase__ : Optional[int]=True ,lowercase__ : str="None" ,lowercase__ : Optional[int]=3 ,lowercase__ : List[Any]=4 ,lowercase__ : Union[str, Any]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = relative_attention
__lowercase = position_biased_input
__lowercase = pos_att_type
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return DebertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.get_config()
__lowercase = 3_0_0
return config
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any] ):
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Union[str, Any] ):
__lowercase = DebertaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )[0]
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )[0]
__lowercase = model(lowercase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : int ,lowercase__ : Tuple ,lowercase__ : int ):
__lowercase = DebertaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = DebertaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.num_labels
__lowercase = DebertaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : List[str] ):
__lowercase = DebertaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = DebertaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = DebertaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__lowercase = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
# compare the actual values for a slice.
__lowercase = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) ,F"{output[:, 1:4, 1:4]}" )
| 104 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase_ : Any = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]:
with open(lowercase , "r" , encoding="utf-8" ) as f:
_a = f.read()
_a = content.split("\n" )
_a = []
_a = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def _lowerCamelCase ( lowercase : bool = False ) -> List[str]:
_a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )]
_a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
_a = [f for f, d in zip(lowercase , lowercase ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 63 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
a : List[Any] = logging.getLogger(__name__)
@dataclass(frozen=a__ )
class __UpperCamelCase :
lowerCamelCase : str
lowerCamelCase : str
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
@dataclass(frozen=a__ )
class __UpperCamelCase :
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] =None
lowerCamelCase : Optional[List[int]] =None
lowerCamelCase : Optional[Union[int, float]] =None
lowerCamelCase : Optional[int] =None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[InputFeatures]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__=False , lowerCAmelCase__ = False , ) -> Optional[int]:
a : Union[str, Any] = hans_processors[task]()
a : Dict = os.path.join(
lowerCAmelCase__ , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(lowerCAmelCase__ ) , lowerCAmelCase__ , ) , )
a : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a, a : Union[str, Any] = label_list[2], label_list[1]
a : Optional[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : int = cached_features_file + ".lock"
with FileLock(lowerCAmelCase__ ):
if os.path.exists(lowerCAmelCase__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
a : Union[str, Any] = torch.load(lowerCAmelCase__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
a : List[Any] = (
processor.get_dev_examples(lowerCAmelCase__ ) if evaluate else processor.get_train_examples(lowerCAmelCase__ )
)
logger.info("Training examples: %s" , len(lowerCAmelCase__ ) )
a : Union[str, Any] = hans_convert_examples_to_features(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info("Saving features into cached file %s" , lowerCAmelCase__ )
torch.save(self.features , lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.features )
def __getitem__( self , lowerCAmelCase__ ) -> InputFeatures:
return self.features[i]
def __a ( self ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class __UpperCamelCase :
lowerCamelCase : List[InputFeatures]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 128 , lowerCAmelCase__=False , lowerCAmelCase__ = False , ) -> Dict:
a : Tuple = hans_processors[task]()
a : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a, a : List[Any] = label_list[2], label_list[1]
a : int = label_list
a : Dict = processor.get_dev_examples(lowerCAmelCase__ ) if evaluate else processor.get_train_examples(lowerCAmelCase__ )
a : int = hans_convert_examples_to_features(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(lowerCAmelCase__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
a : List[Any] = tf.data.Dataset.from_generator(
lowerCAmelCase__ , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __a ( self ) -> Tuple:
return self.dataset
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowerCAmelCase__ ) -> InputFeatures:
return self.features[i]
def __a ( self ) -> Optional[int]:
return self.label_list
class __UpperCamelCase ( a__ ):
def __a ( self , lowerCAmelCase__ ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase__ , "heuristics_train_set.txt" ) ) , "train" )
def __a ( self , lowerCAmelCase__ ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase__ , "heuristics_evaluation_set.txt" ) ) , "dev" )
def __a ( self ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : List[str] = []
for i, line in enumerate(lowerCAmelCase__ ):
if i == 0:
continue
a : Union[str, Any] = "%s-%s" % (set_type, line[0])
a : Optional[int] = line[5]
a : List[Any] = line[6]
a : int = line[7][2:] if line[7].startswith("ex" ) else line[7]
a : List[str] = line[0]
examples.append(InputExample(guid=lowerCAmelCase__ , text_a=lowerCAmelCase__ , text_b=lowerCAmelCase__ , label=lowerCAmelCase__ , pairID=lowerCAmelCase__ ) )
return examples
def _SCREAMING_SNAKE_CASE ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , ) ->List[str]:
'''simple docstring'''
a : Optional[int] = {label: i for i, label in enumerate(_lowercase )}
a : str = []
for ex_index, example in tqdm.tqdm(enumerate(_lowercase ) , desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d" % (ex_index) )
a : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_lowercase , max_length=_lowercase , padding="max_length" , truncation=_lowercase , return_overflowing_tokens=_lowercase , )
a : Tuple = label_map[example.label] if example.label in label_map else 0
a : Tuple = int(example.pairID )
features.append(InputFeatures(**_lowercase , label=_lowercase , pairID=_lowercase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
a : Tuple = {
'''hans''': 3,
}
a : Dict = {
'''hans''': HansProcessor,
}
| 105 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='big_bird'
def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ):
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = type_vocab_size
_a = layer_norm_eps
_a = use_cache
_a = rescale_embeddings
_a = attention_type
_a = use_bias
_a = block_size
_a = num_random_blocks
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 63 | 0 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__UpperCamelCase : int = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( A_ ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
return max(metric_fn(A_ , A_ ) for gt in ground_truths )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : Any = [line.strip() for line in open(A_ , '''r''' ).readlines()]
lowerCAmelCase__ : str = []
if args.gold_data_mode == "qa":
lowerCAmelCase__ : Optional[int] = pd.read_csv(A_ , sep='''\t''' , header=A_ )
for answer_list in data[1]:
lowerCAmelCase__ : Optional[Any] = ast.literal_eval(A_ )
answers.append(A_ )
else:
lowerCAmelCase__ : Union[str, Any] = [line.strip() for line in open(A_ , '''r''' ).readlines()]
lowerCAmelCase__ : int = [[reference] for reference in references]
lowerCAmelCase__ : Any = 0
for prediction, ground_truths in zip(A_ , A_ ):
total += 1
em += metric_max_over_ground_truths(A_ , A_ , A_ )
fa += metric_max_over_ground_truths(A_ , A_ , A_ )
lowerCAmelCase__ : Any = 100.0 * em / total
lowerCAmelCase__ : List[Any] = 100.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : Optional[int] = args.k
lowerCAmelCase__ : Union[str, Any] = [line.strip() for line in open(A_ , '''r''' ).readlines()]
lowerCAmelCase__ : Union[str, Any] = [line.strip() for line in open(A_ , '''r''' ).readlines()]
lowerCAmelCase__ : Optional[int] = 0
for hypo, reference in zip(A_ , A_ ):
lowerCAmelCase__ : List[str] = set(hypo.split('''\t''' )[:k] )
lowerCAmelCase__ : int = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowerCAmelCase__ : str = 100.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
def strip_title(A_ ):
if title.startswith('''"''' ):
lowerCAmelCase__ : List[str] = title[1:]
if title.endswith('''"''' ):
lowerCAmelCase__ : Optional[Any] = title[:-1]
return title
lowerCAmelCase__ : str = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
A_ , return_tensors='''pt''' , padding=A_ , truncation=A_ , )['''input_ids'''].to(args.device )
lowerCAmelCase__ : Optional[int] = rag_model.rag.question_encoder(A_ )
lowerCAmelCase__ : str = question_enc_outputs[0]
lowerCAmelCase__ : Any = rag_model.retriever(
A_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
lowerCAmelCase__ : Optional[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowerCAmelCase__ : Tuple = []
for docs in all_docs:
lowerCAmelCase__ : int = [strip_title(A_ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(A_ ) )
return provenance_strings
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
with torch.no_grad():
lowerCAmelCase__ : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
A_ , return_tensors='''pt''' , padding=A_ , truncation=A_ )
lowerCAmelCase__ : Dict = inputs_dict.input_ids.to(args.device )
lowerCAmelCase__ : Tuple = inputs_dict.attention_mask.to(args.device )
lowerCAmelCase__ : List[str] = rag_model.generate( # rag_model overwrites generate
A_ , attention_mask=A_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=A_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowerCAmelCase__ : Dict = rag_model.retriever.generator_tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
if args.print_predictions:
for q, a in zip(A_ , A_ ):
logger.info('''Q: {} - A: {}'''.format(A_ , A_ ) )
return answers
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=A_ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=A_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=A_ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=A_ , type=A_ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=A_ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=A_ , type=A_ , required=A_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=A_ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=A_ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=A_ , type=A_ , required=A_ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=A_ , type=A_ , required=A_ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=A_ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=A_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=A_ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=A_ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=A_ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=A_ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
lowerCAmelCase__ : List[str] = parser.parse_args()
lowerCAmelCase__ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : int = {}
if args.model_type is None:
lowerCAmelCase__ : Optional[int] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
lowerCAmelCase__ : Dict = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
lowerCAmelCase__ : int = args.n_docs
if args.index_name is not None:
lowerCAmelCase__ : List[str] = args.index_name
if args.index_path is not None:
lowerCAmelCase__ : Optional[int] = args.index_path
else:
lowerCAmelCase__ : Dict = BartForConditionalGeneration
lowerCAmelCase__ : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , A_ )
lowerCAmelCase__ : Tuple = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
lowerCAmelCase__ : List[Any] = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(A_ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(A_ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
lowerCAmelCase__ : Dict = RagRetriever.from_pretrained(A_ , **A_ )
lowerCAmelCase__ : List[Any] = model_class.from_pretrained(A_ , retriever=A_ , **A_ )
model.retriever.init_retrieval()
else:
lowerCAmelCase__ : Union[str, Any] = model_class.from_pretrained(A_ , **A_ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
lowerCAmelCase__ : Tuple = []
for line in tqdm(A_ ):
questions.append(line.strip() )
if len(A_ ) == args.eval_batch_size:
lowerCAmelCase__ : int = evaluate_batch_fn(A_ , A_ , A_ )
preds_file.write('''\n'''.join(A_ ) + '''\n''' )
preds_file.flush()
lowerCAmelCase__ : int = []
if len(A_ ) > 0:
lowerCAmelCase__ : Optional[int] = evaluate_batch_fn(A_ , A_ , A_ )
preds_file.write('''\n'''.join(A_ ) )
preds_file.flush()
score_fn(A_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = get_args()
main(args)
| 106 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , *,
__a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ):
super().__init__()
_a = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
_a = nn.Linear(__a , __a )
_a = nn.Linear(__a , __a )
# parameters for encoder hidden states
_a = clip_extra_context_tokens
_a = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
_a = nn.Linear(__a , __a )
_a = nn.LayerNorm(__a )
def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_a = image_embeddings.shape[0]
_a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_a = classifier_free_guidance_embeddings.expand(
__a , -1 )
_a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_a = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_a = self.embedding_proj(__a )
_a = self.clip_image_embeddings_project_to_time_embeddings(__a )
_a = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_a = self.clip_extra_context_tokens_proj(__a )
_a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
_a = clip_extra_context_tokens.permute(0 , 2 , 1 )
_a = self.encoder_hidden_states_proj(__a )
_a = self.text_encoder_hidden_states_norm(__a )
_a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 63 | 0 |
def __magic_name__ ( A : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
a = gray_code_sequence_string(A )
#
# convert them to integers
for i in range(len(A ) ):
a = int(sequence[i], 2 )
return sequence
def __magic_name__ ( A : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a = gray_code_sequence_string(bit_count - 1 )
a = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a = "0" + smaller_sequence[i]
sequence.append(A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a = "1" + smaller_sequence[i]
sequence.append(A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__=None , snake_case__=None , *snake_case__ , **snake_case__ ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
if config is None:
assert isinstance(self.model , snake_case__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
lowerCAmelCase : Optional[int] = self.model.config
else:
lowerCAmelCase : List[str] = config
lowerCAmelCase : Any = data_args
lowerCAmelCase : Tuple = self.config.tgt_vocab_size if isinstance(self.config , snake_case__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
lowerCAmelCase : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowerCAmelCase : Tuple = label_smoothed_nll_loss
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.optimizer is None:
lowerCAmelCase : Optional[int] = ["bias", "LayerNorm.weight"]
lowerCAmelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
lowerCAmelCase : Union[str, Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowerCAmelCase : Dict = Adafactor
lowerCAmelCase : Optional[int] = {"scale_parameter": False, "relative_step": False}
else:
lowerCAmelCase : int = AdamW
lowerCAmelCase : int = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
lowerCAmelCase : Any = self.args.learning_rate
if self.sharded_ddp:
lowerCAmelCase : int = OSS(
params=snake_case__ , optim=snake_case__ , **snake_case__ , )
else:
lowerCAmelCase : Any = optimizer_cls(snake_case__ , **snake_case__ )
if self.lr_scheduler is None:
lowerCAmelCase : Tuple = self._get_lr_scheduler(snake_case__ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowerCAmelCase : Tuple = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowerCAmelCase : Any = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowerCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case__ )
return scheduler
def lowercase__ ( self ):
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowerCAmelCase : Dict = model(**snake_case__ , use_cache=snake_case__ )[0]
lowerCAmelCase : List[Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowerCAmelCase , lowerCAmelCase : str = model(**snake_case__ , labels=snake_case__ , use_cache=snake_case__ )[:2]
else:
# compute label smoothed loss
lowerCAmelCase : int = model(**snake_case__ , use_cache=snake_case__ )[0]
lowerCAmelCase : List[Any] = torch.nn.functional.log_softmax(snake_case__ , dim=-1 )
lowerCAmelCase , lowerCAmelCase : str = self.loss_fn(snake_case__ , snake_case__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = inputs.pop("labels" )
lowerCAmelCase , lowerCAmelCase : str = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
return loss
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
"""simple docstring"""
lowerCAmelCase : List[str] = self._prepare_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowerCAmelCase : Dict = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **snake_case__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase : Dict = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
lowerCAmelCase : Optional[Any] = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
lowerCAmelCase , lowerCAmelCase : Dict = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowerCAmelCase : int = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowerCAmelCase : Optional[int] = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f""" padded to `max_length`={max_length}""" )
lowerCAmelCase : Optional[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowerCAmelCase : int = tensor
return padded_tensor
| 108 |
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ):
_a = 0.0
_a = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowercase , lowercase )
# Update the winning vector
_a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowercase , lowercase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
A: int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 109 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='OwlViTImageProcessor'
__a =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )):
_a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )]
elif isinstance(__a , __a ) and isinstance(text[0] , __a ):
_a = []
# Maximum number of queries across batch
_a = max([len(__a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__a ) != max_num_queries:
_a = t + [" "] * (max_num_queries - len(__a ))
_a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )
encodings.append(__a )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_a = BatchEncoding()
_a = input_ids
_a = attention_mask
if query_images is not None:
_a = BatchEncoding()
_a = self.image_processor(
__a , return_tensors=__a , **__a ).pixel_values
_a = query_pixel_values
if images is not None:
_a = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ):
return self.image_processor.post_process(*__a , **__a )
def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ):
return self.image_processor.post_process_object_detection(*__a , **__a )
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.image_processor.post_process_image_guided_detection(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _a ( ):
"""simple docstring"""
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return image
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = dct.pop(SCREAMING_SNAKE_CASE )
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase__ = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
lowercase__ = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
lowercase__ = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE , requires_grad=SCREAMING_SNAKE_CASE ), v_bias) )
lowercase__ = qkv_bias
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 3_64 if '''coco''' in model_name else 2_24
lowercase__ = BlipaVisionConfig(image_size=SCREAMING_SNAKE_CASE ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowercase__ = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=SCREAMING_SNAKE_CASE ).to_dict()
elif "opt-6.7b" in model_name:
lowercase__ = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=SCREAMING_SNAKE_CASE ).to_dict()
elif "t5-xl" in model_name:
lowercase__ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase__ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
lowercase__ = BlipaConfig(vision_config=SCREAMING_SNAKE_CASE , text_config=SCREAMING_SNAKE_CASE )
return config, image_size
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase__ = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
lowercase__ = tokenizer('''\n''' , add_special_tokens=SCREAMING_SNAKE_CASE ).input_ids[0]
lowercase__ , lowercase__ = get_blipa_config(SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
lowercase__ = BlipaForConditionalGeneration(SCREAMING_SNAKE_CASE ).eval()
lowercase__ = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
lowercase__ , lowercase__ = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
lowercase__ , lowercase__ , lowercase__ = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE , model_type=SCREAMING_SNAKE_CASE , is_eval=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
original_model.eval()
print('''Done!''' )
# update state dict keys
lowercase__ = original_model.state_dict()
lowercase__ = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE )
if key.startswith('''Qformer.bert''' ):
lowercase__ = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
lowercase__ = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
lowercase__ = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
lowercase__ = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
lowercase__ = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
lowercase__ = key.replace('''t5''' , '''language''' )
lowercase__ = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = hf_model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowercase__ = load_demo_image()
lowercase__ = vis_processors['''eval'''](SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE )
lowercase__ = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE )
# create processor
lowercase__ = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE )
lowercase__ = BlipaProcessor(image_processor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
lowercase__ = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(SCREAMING_SNAKE_CASE )
# make sure processor creates exact same pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
original_model.to(SCREAMING_SNAKE_CASE )
hf_model.to(SCREAMING_SNAKE_CASE )
with torch.no_grad():
if "opt" in model_name:
lowercase__ = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
lowercase__ = hf_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).logits
else:
lowercase__ = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
lowercase__ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
lowercase__ = hf_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowercase__ = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=SCREAMING_SNAKE_CASE )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowercase__ = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=SCREAMING_SNAKE_CASE )
else:
# cast to same type
lowercase__ = logits.dtype
assert torch.allclose(original_logits.to(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
lowercase__ = ''''''
lowercase__ = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE )
lowercase__ = original_model.generate({'''image''': original_pixel_values} )
lowercase__ = hf_model.generate(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , SCREAMING_SNAKE_CASE )
lowercase__ = input_ids.shape[1]
lowercase__ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ = [text.strip() for text in output_text]
print('''HF generation:''' , SCREAMING_SNAKE_CASE )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
lowerCAmelCase = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
lowerCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 110 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(lowercase ) ):
series.append(F'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 63 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]:
try:
if args.tpu_name:
_a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def _lowerCamelCase ( lowercase : List[str] ) -> Any:
_a = 0
for file in file_list:
_a = file.split("/" )[-1]
_a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 )
_a = int(lowercase )
num_samples += sample_count
return num_samples
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int:
_a = count_samples(lowercase )
_a = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
_a = dataset.shuffle(len(lowercase ) )
_a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
_a = dataset.shuffle(args.shuffle_buffer_size )
_a = dataset.batch(lowercase , drop_remainder=lowercase )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
_a = dataset.prefetch(lowercase )
return dataset
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
if not args.no_tpu:
_a = initialize_tpu(lowercase )
_a = tf.distribute.TPUStrategy(lowercase )
else:
_a = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
_a = AutoTokenizer.from_pretrained(args.tokenizer )
_a = AutoConfig.from_pretrained(args.pretrained_model_config )
_a = tokenizer.vocab_size
_a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
_a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
_a = count_samples(lowercase )
_a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_a = steps_per_epoch * args.num_epochs
with strategy.scope():
_a = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_a , _a = create_optimizer(
num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase , metrics=["accuracy"] )
def decode_fn(lowercase : int ):
_a = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase , lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_a = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" )
def mask_with_collator(lowercase : List[Any] ):
# TF really needs an isin() function
_a = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
_a , _a = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , )
return batch
_a = args.per_replica_batch_size * strategy.num_replicas_in_sync
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , )
_a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) )
model.fit(
lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase_ : Any = parse_args()
main(args)
| 63 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
a__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
a__ = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__a )
a__ = -1
a__ = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__a )
a__ = model.generate(__a ,max_new_tokens=10 ,do_sample=__a )
a__ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
a__ = TextStreamer(__a )
model.generate(__a ,max_new_tokens=10 ,do_sample=__a ,streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a__ = cs.out[:-1]
self.assertEqual(__a ,__a )
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
a__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
a__ = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__a )
a__ = -1
a__ = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__a )
a__ = model.generate(__a ,max_new_tokens=10 ,do_sample=__a )
a__ = tokenizer.decode(greedy_ids[0] )
a__ = TextIteratorStreamer(__a )
a__ = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
a__ = Thread(target=model.generate ,kwargs=__a )
thread.start()
a__ = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__a ,__a )
def lowerCamelCase__( self :str ) -> Optional[int]:
a__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
a__ = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__a )
a__ = -1
a__ = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__a )
a__ = model.generate(__a ,max_new_tokens=10 ,do_sample=__a )
a__ = greedy_ids[:, input_ids.shape[1] :]
a__ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
a__ = TextStreamer(__a ,skip_prompt=__a )
model.generate(__a ,max_new_tokens=10 ,do_sample=__a ,streamer=__a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a__ = cs.out[:-1]
self.assertEqual(__a ,__a )
def lowerCamelCase__( self :int ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
a__ = AutoTokenizer.from_pretrained('distilgpt2' )
a__ = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(__a )
a__ = -1
a__ = torch.ones((1, 5) ,device=__a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
a__ = TextStreamer(__a ,skip_special_tokens=__a )
model.generate(__a ,max_new_tokens=1 ,do_sample=__a ,streamer=__a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
a__ = cs.out[:-1] # Remove the final "\n"
a__ = tokenizer(__a ,return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def lowerCamelCase__( self :Any ) -> Any:
a__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
a__ = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(__a )
a__ = -1
a__ = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__a )
a__ = TextIteratorStreamer(__a ,timeout=0.0_01 )
a__ = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
a__ = Thread(target=model.generate ,kwargs=__a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__a ):
a__ = ''
for new_text in streamer:
streamer_text += new_text
| 240 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv3ImageProcessor'
__a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : Tuple ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCamelCase ( lowerCamelCase_ ):
UpperCamelCase : Dict = '''codegen'''
UpperCamelCase : Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Dict , UpperCAmelCase__ : Any=50400 , UpperCAmelCase__ : Union[str, Any]=2048 , UpperCAmelCase__ : List[Any]=2048 , UpperCAmelCase__ : Dict=4096 , UpperCAmelCase__ : List[Any]=28 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : List[Any]=64 , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Tuple="gelu_new" , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Tuple=1E-5 , UpperCAmelCase__ : int=0.0_2 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[str]=50256 , UpperCAmelCase__ : int=50256 , UpperCAmelCase__ : Optional[int]=False , **UpperCAmelCase__ : List[Any] , ) -> List[str]:
_a : Union[str, Any] = vocab_size
_a : Optional[int] = n_ctx
_a : Union[str, Any] = n_positions
_a : Optional[Any] = n_embd
_a : Dict = n_layer
_a : Optional[Any] = n_head
_a : int = n_inner
_a : Optional[int] = rotary_dim
_a : Any = activation_function
_a : int = resid_pdrop
_a : List[Any] = embd_pdrop
_a : str = attn_pdrop
_a : Any = layer_norm_epsilon
_a : Any = initializer_range
_a : int = use_cache
_a : Dict = bos_token_id
_a : Union[str, Any] = eos_token_id
super().__init__(
bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a )
class UpperCamelCase ( lowerCamelCase_ ):
def __init__( self : Optional[int] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" , UpperCAmelCase__ : List[PatchingSpec] = None , UpperCAmelCase__ : bool = False , ) -> int:
super().__init__(__a , task=__a , patching_specs=__a , use_past=__a )
if not getattr(self._config , """pad_token_id""" , __a ):
# TODO: how to do that better?
_a : Optional[int] = 0
@property
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
_a : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__a , direction="""inputs""" )
_a : Tuple = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_a : Optional[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _lowercase ( self : str ) -> Union[str, Any]:
return self._config.n_layer
@property
def _lowercase ( self : int ) -> Union[str, Any]:
return self._config.n_head
def _lowercase ( self : List[Any] , UpperCAmelCase__ : PreTrainedTokenizer , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[TensorType] = None , ) -> List[Any]:
_a : List[Any] = super(__a , self ).generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
# We need to order the input in the way they appears in the forward()
_a : Any = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_a , _a : Union[str, Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_a : Union[str, Any] = seqlen + 2
_a : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a : Optional[int] = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers )
]
_a : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
_a : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
_a : Union[str, Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__a , __a , dtype=__a )] , dim=1 )
return ordered_inputs
@property
def _lowercase ( self : int ) -> List[Any]:
return 13
| 294 |
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> bool:
'''simple docstring'''
A__ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 68 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 100 ) -> int:
_a = 0
_a = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 | 0 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case ( lowerCamelCase_ ):
def __init__( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: str=7 , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: str=99 , __lowerCamelCase: int=32 , __lowerCamelCase: Any=5 , __lowerCamelCase: Union[str, Any]=4 , __lowerCamelCase: Optional[int]=37 , __lowerCamelCase: Optional[Any]="gelu" , __lowerCamelCase: Any=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Optional[Any]=16 , __lowerCamelCase: Dict=2 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: Any=False , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[Any]="None" , __lowerCamelCase: Optional[int]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: List[str]=None , ) -> List[Any]:
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : Tuple = use_input_mask
__UpperCAmelCase : Tuple = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : Any = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Optional[Any] = type_sequence_label_size
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : List[Any] = num_choices
__UpperCAmelCase : Optional[int] = relative_attention
__UpperCAmelCase : Tuple = position_biased_input
__UpperCAmelCase : Dict = pos_att_type
__UpperCAmelCase : Optional[int] = scope
def _lowerCamelCase ( self: Optional[int] ) -> Any:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_input_mask:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : int = None
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Dict = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self: Optional[int] ) -> Tuple:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : List[str] = self.get_config()
__UpperCAmelCase : List[str] = 3_00
return config
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Dict ) -> int:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: Dict , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ) -> int:
__UpperCAmelCase : int = DebertaModel(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase : Dict = model(__a , attention_mask=__a , token_type_ids=__a )[0]
__UpperCAmelCase : Any = model(__a , token_type_ids=__a )[0]
__UpperCAmelCase : int = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _lowerCamelCase ( self: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] ) -> List[Any]:
__UpperCAmelCase : int = DebertaForMaskedLM(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: Dict ) -> List[str]:
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Dict = DebertaForSequenceClassification(__a )
model.to(__a )
model.eval()
__UpperCAmelCase : Tuple = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] ) -> Dict:
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Optional[int] = DebertaForTokenClassification(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase : int = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = DebertaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self: List[str] ) -> Tuple:
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCamelCase__: Optional[Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__: Any = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__: int = True
lowerCamelCase__: str = False
lowerCamelCase__: Optional[Any] = False
lowerCamelCase__: str = False
lowerCamelCase__: Union[str, Any] = False
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
__UpperCAmelCase : Dict = DebertaModelTester(self )
__UpperCAmelCase : Dict = ConfigTester(self , config_class=__a , hidden_size=37 )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self: str ) -> int:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def _lowerCamelCase ( self: Any ) -> int:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def _lowerCamelCase ( self: Any ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
@slow
def _lowerCamelCase ( self: List[str] ) -> str:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Dict = DebertaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _lowerCamelCase ( self: Tuple ) -> int:
pass
@slow
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Tuple = DebertaModel.from_pretrained("microsoft/deberta-base" )
__UpperCAmelCase : Optional[int] = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__UpperCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : Tuple = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
__UpperCAmelCase : Optional[int] = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 157 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
if num < 0:
return False
_a = num
_a = 0
while num > 0:
_a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowerCAmelCase = 'examples/'
_lowerCAmelCase = {
'examples': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), 'release = "VERSION"\n'),
}
_lowerCAmelCase = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_lowerCAmelCase = 'README.md'
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : Optional[int] = f.read()
__UpperCamelCase , __UpperCamelCase : str = REPLACE_PATTERNS[pattern]
__UpperCamelCase : List[str] = replace.replace("VERSION" , snake_case__ )
__UpperCamelCase : Optional[Any] = re_pattern.sub(snake_case__ , snake_case__ )
with open(snake_case__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
for folder, directories, fnames in os.walk(snake_case__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(snake_case__ , snake_case__ ) , snake_case__ , pattern="examples" )
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case__ , snake_case__ , snake_case__ )
if not patch:
update_version_in_examples(snake_case__ )
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = "🤗 Transformers currently provides the following architectures"
__UpperCamelCase : Tuple = "1. Want to contribute a new model?"
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : str = f.readlines()
# Find the start of the list.
__UpperCamelCase : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCamelCase : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__UpperCamelCase : List[Any] = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(snake_case__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(snake_case__ )
def __lowerCAmelCase ( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
__UpperCamelCase : Tuple = f.read()
__UpperCamelCase : List[str] = REPLACE_PATTERNS["init"][0].search(snake_case__ ).groups()[0]
return packaging.version.parse(snake_case__ )
def __lowerCAmelCase ( snake_case__=False ):
__UpperCamelCase : Tuple = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__UpperCamelCase : Optional[Any] = default_version.base_version
elif patch:
__UpperCamelCase : int = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__UpperCamelCase : str = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__UpperCamelCase : Optional[int] = input(F"Which version are you releasing? [{default_version}]" )
if len(snake_case__ ) == 0:
__UpperCamelCase : Any = default_version
print(F"Updating version to {version}." )
global_version_update(snake_case__ , patch=snake_case__ )
def __lowerCAmelCase ( ):
__UpperCamelCase : Tuple = get_version()
__UpperCamelCase : Any = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
__UpperCamelCase : int = current_version.base_version
# Check with the user we got that right.
__UpperCamelCase : Dict = input(F"Which version are we developing now? [{dev_version}]" )
if len(snake_case__ ) == 0:
__UpperCamelCase : Any = dev_version
print(F"Updating version to {version}." )
global_version_update(snake_case__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_lowerCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 298 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 | 0 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowercase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__SCREAMING_SNAKE_CASE : Dict = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__SCREAMING_SNAKE_CASE : Dict = [[0.0, 0.0], [0.0, 0.0]]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = matrix[1][1], matrix[0][0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowercase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowercase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__SCREAMING_SNAKE_CASE : Union[str, Any] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__SCREAMING_SNAKE_CASE : Optional[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__SCREAMING_SNAKE_CASE : Any = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__SCREAMING_SNAKE_CASE : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__SCREAMING_SNAKE_CASE : Optional[int] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__SCREAMING_SNAKE_CASE : List[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__SCREAMING_SNAKE_CASE : List[Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__SCREAMING_SNAKE_CASE : List[str] = array(lowercase__ )
for i in range(3 ):
for j in range(3 ):
__SCREAMING_SNAKE_CASE : Dict = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__SCREAMING_SNAKE_CASE : int = array(lowercase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowercase__ )
# Calculate the inverse of the matrix
return [[float(d(lowercase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 9 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ : Any = get_tests_dir('fixtures')
lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
_a = 0
def UpperCamelCase__ ( self : str ):
_a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
_a = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : Optional[Any] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : List[Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCamelCase__ ( self : Any ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : Tuple ):
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 63 | 0 |
"""simple docstring"""
import argparse
import os
import re
__A = 'src/transformers'
# Pattern that looks at the indentation in a line.
__A = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
__A = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__A = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
__A = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__A = re.compile(R"""\[([^\]]+)\]""")
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Tuple = _re_indent.search(_SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Any = 0
lowerCAmelCase__ :Any = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_SCREAMING_SNAKE_CASE ):
index += 1
lowerCAmelCase__ :Optional[int] = ['\n'.join(lines[:index] )]
else:
lowerCAmelCase__ :Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase__ :List[str] = [lines[index]]
index += 1
while index < len(_SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(_SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_SCREAMING_SNAKE_CASE ) )
if index < len(_SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase__ :str = [lines[index + 1]]
index += 1
else:
lowerCAmelCase__ :Optional[int] = []
else:
blocks.append('\n'.join(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ :List[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_SCREAMING_SNAKE_CASE ) > 0:
blocks.append('\n'.join(_SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_SCREAMING_SNAKE_CASE ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
def _inner(_SCREAMING_SNAKE_CASE ):
return key(_SCREAMING_SNAKE_CASE ).lower().replace('_' , '' )
return _inner
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
"""simple docstring"""
def noop(_SCREAMING_SNAKE_CASE ):
return x
if key is None:
lowerCAmelCase__ :List[Any] = noop
# Constants are all uppercase, they go first.
lowerCAmelCase__ :List[str] = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase__ :Any = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE )[0].isupper() and not key(_SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase__ :int = [obj for obj in objects if not key(_SCREAMING_SNAKE_CASE )[0].isupper()]
lowerCAmelCase__ :Optional[int] = ignore_underscore(_SCREAMING_SNAKE_CASE )
return sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
def _replace(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Any = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCAmelCase__ :Optional[Any] = [part.strip().replace('\"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase__ :Dict = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(_SCREAMING_SNAKE_CASE )] ) + "]"
lowerCAmelCase__ :List[str] = import_statement.split('\n' )
if len(_SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase__ :Any = 2 if lines[1].strip() == '[' else 1
lowerCAmelCase__ :List[Any] = [(i, _re_strip_line.search(_SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase__ :Tuple = sort_objects(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] )
lowerCAmelCase__ :Optional[int] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase__ :str = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase__ :List[str] = [part.strip().replace('\"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase__ :Any = keys[:-1]
lowerCAmelCase__ :Dict = get_indent(lines[1] ) + ', '.join([F"\"{k}\"" for k in sort_objects(_SCREAMING_SNAKE_CASE )] )
return "\n".join(_SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase__ :Tuple = _re_bracket_content.sub(_replace , _SCREAMING_SNAKE_CASE )
return import_statement
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ) ->str:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
lowerCAmelCase__ :List[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase__ :Union[str, Any] = split_code_in_indented_blocks(
_SCREAMING_SNAKE_CASE , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase__ :Any = main_blocks[block_idx]
lowerCAmelCase__ :List[str] = block.split('\n' )
# Get to the start of the imports.
lowerCAmelCase__ :Union[str, Any] = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase__ :List[Any] = len(_SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(_SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase__ :Optional[int] = '\n'.join(block_lines[line_idx:-1] )
lowerCAmelCase__ :Union[str, Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase__ :Optional[int] = split_code_in_indented_blocks(_SCREAMING_SNAKE_CASE , indent_level=_SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase__ :Dict = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase__ :Optional[int] = [(pattern.search(_SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(_SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase__ :Any = [(i, key) for i, key in enumerate(_SCREAMING_SNAKE_CASE ) if key is not None]
lowerCAmelCase__ :Any = [x[0] for x in sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase__ :Dict = 0
lowerCAmelCase__ :Optional[int] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase__ :str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase__ :Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_SCREAMING_SNAKE_CASE ) )
def __A (_SCREAMING_SNAKE_CASE=True ) ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowerCAmelCase__ :Union[str, Any] = sort_imports(os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' ) , check_only=_SCREAMING_SNAKE_CASE )
if result:
lowerCAmelCase__ :Dict = [os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' )]
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"Would overwrite {len(_SCREAMING_SNAKE_CASE )} files, run `make style`." )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
__A = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 293 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='gpt_bigcode'
__a =['past_key_values']
__a ={
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = attention_softmax_in_fpaa
_a = scale_attention_softmax_in_fpaa
_a = multi_query
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
| 63 | 0 |
def UpperCAmelCase_ ( _A , _A , _A = 0 , _A = 0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = right or len(_A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_A , _A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63 | 0 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_lowercase: Optional[Any] = get_logger(__name__)
_lowercase: Tuple = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _lowercase :
"""simple docstring"""
@add_start_docstrings(__a )
def __call__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _lowercase :
"""simple docstring"""
@add_start_docstrings(__a )
def __call__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
@add_start_docstrings(__a )
def __call__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
for processor in self:
a = inspect.signature(processor.__call__ ).parameters
if len(__a ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
a = processor(__a , __a , __a , **__a )
else:
a = processor(__a , __a , __a )
return scores
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(__a , __a ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
a = temperature
def __call__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = scores / self.temperature
return scores
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ = -float("Inf" ) , lowerCamelCase_ = 1 ):
"""simple docstring"""
if not isinstance(__a , __a ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(__a , __a ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
a = top_p
a = filter_value
a = min_tokens_to_keep
def __call__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a , a = lax.top_k(__a , scores.shape[-1] )
a = jnp.full_like(__a , self.filter_value )
a = jax.nn.softmax(__a , axis=-1 ).cumsum(axis=-1 )
a = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
a = jnp.roll(__a , 1 )
score_mask |= score_mask.at[:, 0].set(__a )
# min tokens to keep
a = score_mask.at[:, : self.min_tokens_to_keep].set(__a )
a = jnp.where(__a , __a , __a )
a = jax.lax.sort_key_val(__a , __a )[-1]
return next_scores
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ = -float("Inf" ) , lowerCamelCase_ = 1 ):
"""simple docstring"""
if not isinstance(__a , __a ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
a = max(__a , __a )
a = filter_value
def __call__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a , a = scores.shape
a = jnp.full(batch_size * vocab_size , self.filter_value )
a = min(self.top_k , scores.shape[-1] ) # Safety check
a , a = lax.top_k(__a , __a )
a = jnp.broadcast_to((jnp.arange(__a ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
a = topk_scores.flatten()
a = topk_indices.flatten() + shift
a = next_scores_flat.at[topk_indices_flat].set(__a )
a = next_scores_flat.reshape(__a , __a )
return next_scores
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
a = bos_token_id
def __call__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = jnp.full(scores.shape , -float("inf" ) )
a = 1 - jnp.bool_(cur_len - 1 )
a = jnp.where(__a , new_scores.at[:, self.bos_token_id].set(0 ) , __a )
return scores
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = max_length
a = eos_token_id
def __call__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = jnp.full(scores.shape , -float("inf" ) )
a = 1 - jnp.bool_(cur_len - self.max_length + 1 )
a = jnp.where(__a , new_scores.at[:, self.eos_token_id].set(0 ) , __a )
return scores
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(__a , __a ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(__a , __a ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
a = min_length
a = eos_token_id
def __call__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
a = jnp.where(__a , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , __a )
return scores
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = list(__a )
a = begin_index
def __call__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = 1 - jnp.bool_(cur_len - self.begin_index )
a = jnp.where(__a , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , __a )
return scores
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
a = list(__a )
def __call__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
a = dict(__a )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
a = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
a = force_token_array.at[index].set(__a )
a = jnp.intaa(__a )
def __call__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
def _force_token(lowerCamelCase_ ):
a = scores.shape[0]
a = self.force_token_array[generation_idx]
a = jnp.ones_like(__a , dtype=scores.dtype ) * -float("inf" )
a = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
a = lax.dynamic_update_slice(__a , __a , (0, current_token) )
return new_scores
a = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__a ) , lambda: scores , ) , )
return scores
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = generate_config.eos_token_id
a = generate_config.no_timestamps_token_id
a = generate_config.no_timestamps_token_id + 1
a = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__a , "max_initial_timestamp_index" ):
a = generate_config.max_initial_timestamp_index
else:
a = model_config.vocab_size
if self.max_initial_timestamp_index is None:
a = model_config.vocab_size
def __call__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(lowerCamelCase_ , lowerCamelCase_ ):
a = jnp.where((cur_len - self.begin_index) >= 1 , __a , __a )
a = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __a , )
a = jnp.where((cur_len - self.begin_index) < 2 , __a , __a )
a = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __a , __a , )
return jnp.where(
__a , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , __a , )
a = jax.vmap(__a )(__a , __a )
a = jnp.where(cur_len == self.begin_index , __a , __a )
a = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __a , )
a = self.timestamp_begin + self.max_initial_timestamp_index
a = jnp.where(
__a , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , __a , )
# if sum of probability over timestamps is above any other token, sample timestamp
a = jax.nn.log_softmax(__a , axis=-1 )
def handle_cumulative_probs(lowerCamelCase_ , lowerCamelCase_ ):
a = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
a = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , __a , )
a = jax.vmap(__a )(__a , __a )
return scores
| 227 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deta'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Dict ):
return self.d_model
def UpperCamelCase__ ( self : List[str] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 63 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__lowerCamelCase = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
if isinstance(UpperCAmelCase__, torch.Tensor ):
return image
elif isinstance(UpperCAmelCase__, PIL.Image.Image ):
A_ = [image]
A_ = [trans(img.convert("""RGB""" ) ) for img in image]
A_ = torch.stack(UpperCAmelCase__ )
return image
class A__ ( lowerCamelCase_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
A_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__a , scheduler=__a )
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
# get the original timestep using init_timestep
A_ = min(int(num_inference_steps * strength ) , __a )
A_ = max(num_inference_steps - init_timestep , 0 )
A_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[str]:
'''simple docstring'''
if not isinstance(__a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__a )}''' )
A_ = image.to(device=__a , dtype=__a )
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__a )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
A_ = init_latents.shape
A_ = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
# get latents
print("""add noise to latents at timestep""" , __a )
A_ = self.scheduler.add_noise(__a , __a , __a )
A_ = init_latents
return latents
@torch.no_grad()
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = 0.8 , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 50 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ) -> Optional[int]:
'''simple docstring'''
self.check_inputs(__a )
# 2. Preprocess image
A_ = preprocess(__a )
# 3. set timesteps
self.scheduler.set_timesteps(__a , device=self.device )
A_ , A_ = self.get_timesteps(__a , __a , self.device )
A_ = timesteps[:1].repeat(__a )
# 4. Prepare latent variables
A_ = self.prepare_latents(__a , __a , __a , self.unet.dtype , self.device , __a )
A_ = latents
# 5. Denoising loop
for t in self.progress_bar(__a ):
# 1. predict noise model_output
A_ = self.unet(__a , __a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A_ = self.scheduler.step(
__a , __a , __a , eta=__a , use_clipped_model_output=__a , generator=__a , ).prev_sample
A_ = (image / 2 + 0.5).clamp(0 , 1 )
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(__a )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__a )
| 162 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase )
_a = tok.pad_token_id
def get_lens(lowercase : Optional[int] ):
_a = tqdm(
DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist()
_a = batch["labels"].ne(lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase , lowercase ):
max_lens.append(max(lowercase , lowercase ) )
else:
max_lens.extend(lowercase )
return max_lens
_a = get_lens(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase )
_a = get_lens(lowercase )
pickle_save(lowercase , train_ds.len_file )
pickle_save(lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 63 | 0 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase_ ( lowerCamelCase_ , lowerCamelCase_ ):
@register_to_config
def __init__( self , *,
__UpperCamelCase = 4 , __UpperCamelCase = 7_6_8 , __UpperCamelCase , __UpperCamelCase , ):
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
UpperCamelCase_ = nn.Linear(__a , __a )
UpperCamelCase_ = nn.Linear(__a , __a )
# parameters for encoder hidden states
UpperCamelCase_ = clip_extra_context_tokens
UpperCamelCase_ = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
UpperCamelCase_ = nn.Linear(__a , __a )
UpperCamelCase_ = nn.LayerNorm(__a )
def lowerCamelCase_ ( self , *, __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCamelCase_ = image_embeddings.shape[0]
UpperCamelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCamelCase_ = classifier_free_guidance_embeddings.expand(
__a , -1 )
UpperCamelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCamelCase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCamelCase_ = self.embedding_proj(__a )
UpperCamelCase_ = self.clip_image_embeddings_project_to_time_embeddings(__a )
UpperCamelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCamelCase_ = self.clip_extra_context_tokens_proj(__a )
UpperCamelCase_ = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
UpperCamelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCamelCase_ = self.encoder_hidden_states_proj(__a )
UpperCamelCase_ = self.text_encoder_hidden_states_norm(__a )
UpperCamelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 122 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
_a = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
# pass variant but use the non-variant filenames
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Dict ):
_a = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : List[str] ):
# pass variant but use the non-variant filenames
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 63 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : int = None
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Tuple ,__snake_case :Tuple=1 ,__snake_case :List[Any]=0 ,__snake_case :Any=2 ,__snake_case :Any=5_12 ,__snake_case :Optional[Any]="cls" ,__snake_case :List[Any]=False ,__snake_case :Tuple=True ,**__snake_case :Tuple ,) -> int:
super().__init__(pad_token_id=__a ,bos_token_id=__a ,eos_token_id=__a ,**__a )
a__ = project_dim
a__ = pooler_fn
a__ = learn_encoder
a__ = use_attention_mask
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = [R'''pooler''', R'''logit_scale''']
UpperCAmelCase__ : List[Any] = [R'''position_ids''', R'''predictions.decoder.bias''']
UpperCAmelCase__ : Any = '''roberta'''
UpperCAmelCase__ : Optional[int] = RobertaSeriesConfig
def __init__( self :Optional[int] ,__snake_case :List[Any] ) -> List[Any]:
super().__init__(__a )
a__ = XLMRobertaModel(__a )
a__ = nn.Linear(config.hidden_size ,config.project_dim )
a__ = getattr(__a ,'has_pre_transformation' ,__a )
if self.has_pre_transformation:
a__ = nn.Linear(config.hidden_size ,config.project_dim )
a__ = nn.LayerNorm(config.hidden_size ,eps=config.layer_norm_eps )
self.post_init()
def lowerCamelCase__( self :Any ,__snake_case :Optional[torch.Tensor] = None ,__snake_case :Optional[torch.Tensor] = None ,__snake_case :Optional[torch.Tensor] = None ,__snake_case :Optional[torch.Tensor] = None ,__snake_case :Optional[torch.Tensor] = None ,__snake_case :Optional[torch.Tensor] = None ,__snake_case :Optional[torch.Tensor] = None ,__snake_case :Optional[torch.Tensor] = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[bool] = None ,) -> List[str]:
a__ = return_dict if return_dict is not None else self.config.use_return_dict
a__ = self.base_model(
input_ids=__a ,attention_mask=__a ,token_type_ids=__a ,position_ids=__a ,head_mask=__a ,inputs_embeds=__a ,encoder_hidden_states=__a ,encoder_attention_mask=__a ,output_attentions=__a ,output_hidden_states=True if self.has_pre_transformation else output_hidden_states ,return_dict=__a ,)
if self.has_pre_transformation:
a__ = outputs['hidden_states'][-2]
a__ = self.pre_LN(__a )
a__ = self.transformation_pre(__a )
return TransformationModelOutput(
projection_state=__a ,last_hidden_state=outputs.last_hidden_state ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
else:
a__ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__a ,last_hidden_state=outputs.last_hidden_state ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
| 240 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : bytes ) -> str:
return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] )
def _lowerCamelCase ( lowercase : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
_a : Any = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
_a : str = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
_a : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : List[str] = value
elif weight_type == "weight_g":
_a : Optional[Any] = value
elif weight_type == "weight_v":
_a : int = value
elif weight_type == "bias":
_a : str = value
else:
_a : List[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[Any] = []
_a : Tuple = fairseq_model.state_dict()
_a : List[str] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
_a : str = True
else:
for key, mapped_key in MAPPING.items():
_a : Optional[int] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_a : int = True
if "*" in mapped_key:
_a : int = name.split(UpperCamelCase__ )[0].split(""".""" )[-2]
_a : List[str] = mapped_key.replace("""*""" , UpperCamelCase__ )
if "weight_g" in name:
_a : Optional[int] = """weight_g"""
elif "weight_v" in name:
_a : str = """weight_v"""
elif "weight" in name:
_a : Any = """weight"""
elif "bias" in name:
_a : List[str] = """bias"""
else:
_a : List[str] = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = full_name.split("""conv_layers.""" )[-1]
_a : Any = name.split(""".""" )
_a : Any = int(items[0] )
_a : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = SEWConfig()
if is_finetuned:
_a : List[Any] = model.wav_encoder.wav_model.cfg
else:
_a : Tuple = model.cfg
_a : Dict = fs_config.conv_bias
_a : Any = eval(fs_config.conv_feature_layers )
_a : Dict = [x[0] for x in conv_layers]
_a : Optional[Any] = [x[1] for x in conv_layers]
_a : Optional[int] = [x[2] for x in conv_layers]
_a : Optional[int] = """gelu"""
_a : List[Any] = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
_a : Tuple = 0.0
_a : Optional[int] = fs_config.activation_fn.name
_a : str = fs_config.encoder_embed_dim
_a : int = 0.02
_a : Optional[Any] = fs_config.encoder_ffn_embed_dim
_a : List[Any] = 1e-5
_a : Union[str, Any] = fs_config.encoder_layerdrop
_a : List[Any] = fs_config.encoder_attention_heads
_a : Optional[int] = fs_config.conv_pos_groups
_a : int = fs_config.conv_pos
_a : Union[str, Any] = len(UpperCamelCase__ )
_a : str = fs_config.encoder_layers
_a : List[Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_a : Any = model.cfg
_a : Optional[Any] = fs_config.final_dropout
_a : int = fs_config.layerdrop
_a : Dict = fs_config.activation_dropout
_a : str = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_a : Optional[Any] = fs_config.attention_dropout
_a : Tuple = fs_config.dropout_input
_a : Union[str, Any] = fs_config.dropout
_a : Dict = fs_config.mask_channel_length
_a : List[Any] = fs_config.mask_channel_prob
_a : Union[str, Any] = fs_config.mask_length
_a : str = fs_config.mask_prob
_a : int = """Wav2Vec2FeatureExtractor"""
_a : Dict = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True ):
'''simple docstring'''
if is_finetuned:
_a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_a , _a , _a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_a : str = SEWConfig.from_pretrained(UpperCamelCase__ )
else:
_a : str = convert_config(model[0] , UpperCamelCase__ )
_a : Tuple = model[0].eval()
_a : Tuple = True if config.feat_extract_norm == """layer""" else False
_a : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
if is_finetuned:
if dict_path:
_a : Dict = Dictionary.load(UpperCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : Optional[int] = target_dict.pad_index
_a : List[Any] = target_dict.bos_index
_a : Any = target_dict.pad_index
_a : List[Any] = target_dict.bos_index
_a : List[Any] = target_dict.eos_index
_a : Tuple = len(target_dict.symbols )
_a : List[Any] = os.path.join(UpperCamelCase__ , """vocab.json""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCamelCase__ ) )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , UpperCamelCase__ )
_a : Dict = WavaVecaCTCTokenizer(
UpperCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=UpperCamelCase__ , )
_a : Union[str, Any] = WavaVecaProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
_a : int = SEWForCTC(UpperCamelCase__ )
else:
_a : Optional[int] = SEWModel(UpperCamelCase__ )
feature_extractor.save_pretrained(UpperCamelCase__ )
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_snake_case = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 294 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any:
model.train()
_a = model(lowercase )
_a = F.mse_loss(lowercase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]:
set_seed(42 )
_a = RegressionModel()
_a = deepcopy(lowercase )
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
model.to(accelerator.device )
if sched:
_a = AdamW(params=model.parameters() , lr=1E-3 )
_a = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
_a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase )
else:
_a , _a = accelerator.prepare(lowercase , lowercase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]:
# Test when on a single CPU or GPU that the context manager does nothing
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : Tuple ) -> Tuple:
# Test on distributed setup that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
GradientState._reset_state()
def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
_a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _lowerCamelCase ( ) -> Any:
_a = Accelerator()
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
_a = RegressionDataset(length=96 )
_a = DataLoader(lowercase , batch_size=16 )
_a , _a = accelerator.prepare(lowercase , lowercase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if iteration < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if batch_num < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCamelCase ( ) -> Optional[Any]:
_a = Accelerator()
_a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowercase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowercase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowercase , lowercase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase )
def _lowerCamelCase ( lowercase : Any ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 63 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 68 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='trocr'
__a =['past_key_values']
__a ={
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_layers
_a = decoder_attention_heads
_a = decoder_ffn_dim
_a = activation_function
_a = max_position_embeddings
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = scale_embedding
_a = use_learned_position_embeddings
_a = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 63 | 0 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_snake_case = 10
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> int:
for i in range(snake_case__, snake_case__ ):
if array[i] == target:
return i
return -1
def _UpperCamelCase ( snake_case__, snake_case__ ) -> int:
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : int = len(snake_case__ )
while left <= right:
if right - left < precision:
return lin_search(snake_case__, snake_case__, snake_case__, snake_case__ )
__UpperCAmelCase : Union[str, Any] = (left + right) // 3 + 1
__UpperCAmelCase : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__UpperCAmelCase : int = one_third - 1
elif array[two_third] < target:
__UpperCAmelCase : Any = two_third + 1
else:
__UpperCAmelCase : Tuple = one_third + 1
__UpperCAmelCase : List[str] = two_third - 1
else:
return -1
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> int:
if left < right:
if right - left < precision:
return lin_search(snake_case__, snake_case__, snake_case__, snake_case__ )
__UpperCAmelCase : int = (left + right) // 3 + 1
__UpperCAmelCase : Union[str, Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(snake_case__, one_third - 1, snake_case__, snake_case__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1, snake_case__, snake_case__, snake_case__ )
else:
return rec_ternary_search(one_third + 1, two_third - 1, snake_case__, snake_case__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input('''Enter numbers separated by comma:\n''').strip()
_snake_case = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
_snake_case = int(input('''Enter the number to be found in the list:\n''').strip())
_snake_case = ite_ternary_search(collection, target)
_snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'Iterative search: {target} found at positions: {resulta}')
print(F'Recursive search: {target} found at positions: {resulta}')
else:
print('''Not found''')
| 157 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase_ : Any = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]:
with open(lowercase , "r" , encoding="utf-8" ) as f:
_a = f.read()
_a = content.split("\n" )
_a = []
_a = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def _lowerCamelCase ( lowercase : bool = False ) -> List[str]:
_a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )]
_a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
_a = [f for f, d in zip(lowercase , lowercase ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 63 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 298 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='big_bird'
def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ):
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = type_vocab_size
_a = layer_norm_eps
_a = use_cache
_a = rescale_embeddings
_a = attention_type
_a = use_bias
_a = block_size
_a = num_random_blocks
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 63 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : str =get_tests_dir('fixtures/test_sentencepiece.model')
__lowerCAmelCase : Optional[int] ={'target_lang': 'fi', 'source_lang': 'en'}
__lowerCAmelCase : Optional[int] ='>>zh<<'
__lowerCAmelCase : Tuple ='Helsinki-NLP/'
if is_torch_available():
__lowerCAmelCase : int ='pt'
elif is_tf_available():
__lowerCAmelCase : Any ='tf'
else:
__lowerCAmelCase : str ='jax'
@require_sentencepiece
class _lowercase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = MarianTokenizer
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = True
def __magic_name__( self :List[str] ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
__SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(__a , range(len(__a ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = Path(self.tmpdirname )
save_json(__a , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(__a , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__a , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(__a , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
__SCREAMING_SNAKE_CASE : Dict = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__( self :Optional[int] , **lowerCAmelCase__ :Any ) -> Any:
return MarianTokenizer.from_pretrained(self.tmpdirname , **__a )
def __magic_name__( self :int , lowerCAmelCase__ :List[Any] ) -> Dict:
return (
"This is a test",
"This is a test",
)
def __magic_name__( self :Any ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = '''</s>'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def __magic_name__( self :int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__a ) , 9 )
def __magic_name__( self :Dict ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[str] = MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
__SCREAMING_SNAKE_CASE : Any = en_de_tokenizer(['''I am a small frog'''] , return_tensors=__a )
self.assertIsInstance(__a , __a )
__SCREAMING_SNAKE_CASE : Any = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(__a , batch.input_ids[0] )
__SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__a )
__SCREAMING_SNAKE_CASE : List[Any] = [x.name for x in Path(__a ).glob('''*''' )]
self.assertIn('''source.spm''' , __a )
MarianTokenizer.from_pretrained(__a )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = tok(
['''I am a small frog''' * 1_000, '''I am a small frog'''] , padding=__a , truncation=__a , return_tensors=__a )
self.assertIsInstance(__a , __a )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def __magic_name__( self :str ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Dict = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=__a , return_tensors=__a )
self.assertIsInstance(__a , __a )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __magic_name__( self :List[Any] ) -> List[str]:
# fmt: off
__SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[str] = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
__SCREAMING_SNAKE_CASE : Dict = '''Tämä on testi'''
__SCREAMING_SNAKE_CASE : str = '''This is a test'''
__SCREAMING_SNAKE_CASE : Optional[Any] = [76, 7, 2_047, 2]
__SCREAMING_SNAKE_CASE : List[str] = [69, 12, 11, 940, 2]
__SCREAMING_SNAKE_CASE : List[str] = tokenizer(__a ).input_ids
self.assertListEqual(__a , __a )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(text_target=__a ).input_ids
self.assertListEqual(__a , __a )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(__a , skip_special_tokens=__a )
self.assertEqual(__a , __a )
| 9 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , *,
__a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ):
super().__init__()
_a = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
_a = nn.Linear(__a , __a )
_a = nn.Linear(__a , __a )
# parameters for encoder hidden states
_a = clip_extra_context_tokens
_a = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
_a = nn.Linear(__a , __a )
_a = nn.LayerNorm(__a )
def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_a = image_embeddings.shape[0]
_a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_a = classifier_free_guidance_embeddings.expand(
__a , -1 )
_a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_a = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_a = self.embedding_proj(__a )
_a = self.clip_image_embeddings_project_to_time_embeddings(__a )
_a = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_a = self.clip_extra_context_tokens_proj(__a )
_a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
_a = clip_extra_context_tokens.permute(0 , 2 , 1 )
_a = self.encoder_hidden_states_proj(__a )
_a = self.text_encoder_hidden_states_norm(__a )
_a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 63 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :List[str] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :str = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :str = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Any = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :List[Any] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Optional[int] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :str = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Tuple = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :str = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :List[Any] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Any = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
def __A (*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
requires_backends(_SCREAMING_SNAKE_CASE , ['torch'] )
def __A (*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
requires_backends(_SCREAMING_SNAKE_CASE , ['torch'] )
def __A (*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
requires_backends(_SCREAMING_SNAKE_CASE , ['torch'] )
def __A (*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
requires_backends(_SCREAMING_SNAKE_CASE , ['torch'] )
def __A (*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
requires_backends(_SCREAMING_SNAKE_CASE , ['torch'] )
def __A (*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
requires_backends(_SCREAMING_SNAKE_CASE , ['torch'] )
def __A (*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
requires_backends(_SCREAMING_SNAKE_CASE , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :List[Any] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :List[str] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Optional[int] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Tuple = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Tuple = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Any = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Optional[int] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :int = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Dict = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :List[Any] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :int = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Any = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Optional[Any] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :List[Any] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Tuple = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Any = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Tuple = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Dict = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Optional[Any] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Any = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Tuple = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Dict = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Optional[int] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Tuple = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :int = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Optional[int] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Any = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Tuple = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :int = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Optional[int] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Any = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Tuple = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Any = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Optional[int] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Any = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
class _lowerCAmelCase ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__magic_name__ :Optional[int] = ["""torch"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch'] )
| 293 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = MobileBertConfig.from_json_file(_A )
print(F'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = MobileBertForPreTraining(_A )
# Load weights from tf checkpoint
SCREAMING_SNAKE_CASE__ = load_tf_weights_in_mobilebert(_A , _A , _A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 314 |
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ):
_a = 0.0
_a = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowercase , lowercase )
# Update the winning vector
_a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowercase , lowercase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63 | 0 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_lowercase: Dict = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
_lowercase: Any = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
_lowercase: Optional[Any] = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
_lowercase: Tuple = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0.9 , lowerCamelCase_=3 , lowerCamelCase_=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version("3.6.5" ):
a = [
meteor_score.single_meteor_score(
word_tokenize(__a ) , word_tokenize(__a ) , alpha=__a , beta=__a , gamma=__a )
for ref, pred in zip(__a , __a )
]
else:
a = [
meteor_score.single_meteor_score(__a , __a , alpha=__a , beta=__a , gamma=__a )
for ref, pred in zip(__a , __a )
]
return {"meteor": np.mean(__a )}
| 227 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='OwlViTImageProcessor'
__a =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )):
_a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )]
elif isinstance(__a , __a ) and isinstance(text[0] , __a ):
_a = []
# Maximum number of queries across batch
_a = max([len(__a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__a ) != max_num_queries:
_a = t + [" "] * (max_num_queries - len(__a ))
_a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )
encodings.append(__a )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_a = BatchEncoding()
_a = input_ids
_a = attention_mask
if query_images is not None:
_a = BatchEncoding()
_a = self.image_processor(
__a , return_tensors=__a , **__a ).pixel_values
_a = query_pixel_values
if images is not None:
_a = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ):
return self.image_processor.post_process(*__a , **__a )
def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ):
return self.image_processor.post_process_object_detection(*__a , **__a )
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.image_processor.post_process_image_guided_detection(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A__ :
lowercase = 42
lowercase = None
lowercase = None
def UpperCAmelCase__ ( ) -> Node | None:
A_ = Node(1 )
A_ = Node(2 )
A_ = Node(3 )
A_ = Node(4 )
A_ = Node(5 )
return tree
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return (max(height(root.left ), height(root.right ) ) + 1) if root else 0
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Sequence[Node | None]:
A_ = []
if root is None:
return output
A_ = deque([root] )
while process_queue:
A_ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Sequence[Node | None]:
A_ = []
def populate_output(UpperCAmelCase__, UpperCAmelCase__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left, level - 1 )
populate_output(root.right, level - 1 )
populate_output(UpperCAmelCase__, UpperCAmelCase__ )
return output
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Sequence[Node | None]:
A_ = []
def populate_output(UpperCAmelCase__, UpperCAmelCase__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right, level - 1 )
populate_output(root.left, level - 1 )
populate_output(UpperCAmelCase__, UpperCAmelCase__ )
return output
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
A_ = []
A_ = 0
A_ = height(UpperCAmelCase__ )
for h in range(1, height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(UpperCAmelCase__, UpperCAmelCase__ ) )
A_ = 1
else:
output.append(get_nodes_from_right_to_left(UpperCAmelCase__, UpperCAmelCase__ ) )
A_ = 0
return output
def UpperCAmelCase__ ( ) -> None: # Main function for testing.
A_ = make_tree()
print(F'''In-order Traversal: {inorder(UpperCAmelCase__ )}''' )
print(F'''Pre-order Traversal: {preorder(UpperCAmelCase__ )}''' )
print(F'''Post-order Traversal: {postorder(UpperCAmelCase__ )}''', """\n""" )
print(F'''Height of Tree: {height(UpperCAmelCase__ )}''', """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(UpperCAmelCase__ ), """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1, height(UpperCAmelCase__ ) + 1 ):
print(F'''Level {level}:''', get_nodes_from_left_to_right(UpperCAmelCase__, level=UpperCAmelCase__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 162 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(lowercase ) ):
series.append(F'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 63 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
_A = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
_A = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class lowercase_ ( lowerCamelCase_ ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = ["""input_ids""", """attention_mask"""]
A__ : Tuple = BartTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="replace" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=False , __UpperCamelCase=True , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
UpperCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __a ) != add_prefix_space:
UpperCamelCase_ = getattr(__a , pre_tok_state.pop("""type""" ) )
UpperCamelCase_ = add_prefix_space
UpperCamelCase_ = pre_tok_class(**__a )
UpperCamelCase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase_ = """post_processor"""
UpperCamelCase_ = getattr(self.backend_tokenizer , __a , __a )
if tokenizer_component_instance:
UpperCamelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase_ = tuple(state["""sep"""] )
if "cls" in state:
UpperCamelCase_ = tuple(state["""cls"""] )
UpperCamelCase_ = False
if state.get("""add_prefix_space""" , __a ) != add_prefix_space:
UpperCamelCase_ = add_prefix_space
UpperCamelCase_ = True
if state.get("""trim_offsets""" , __a ) != trim_offsets:
UpperCamelCase_ = trim_offsets
UpperCamelCase_ = True
if changes_to_apply:
UpperCamelCase_ = getattr(__a , state.pop("""type""" ) )
UpperCamelCase_ = component_class(**__a )
setattr(self.backend_tokenizer , __a , __a )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value
UpperCamelCase_ = value
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = kwargs.get("""is_split_into_words""" , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__a , **__a )
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = kwargs.get("""is_split_into_words""" , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__a , **__a )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
UpperCamelCase_ = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=None ):
"""simple docstring"""
UpperCamelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ):
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 122 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]:
try:
if args.tpu_name:
_a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def _lowerCamelCase ( lowercase : List[str] ) -> Any:
_a = 0
for file in file_list:
_a = file.split("/" )[-1]
_a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 )
_a = int(lowercase )
num_samples += sample_count
return num_samples
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int:
_a = count_samples(lowercase )
_a = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
_a = dataset.shuffle(len(lowercase ) )
_a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
_a = dataset.shuffle(args.shuffle_buffer_size )
_a = dataset.batch(lowercase , drop_remainder=lowercase )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
_a = dataset.prefetch(lowercase )
return dataset
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
if not args.no_tpu:
_a = initialize_tpu(lowercase )
_a = tf.distribute.TPUStrategy(lowercase )
else:
_a = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
_a = AutoTokenizer.from_pretrained(args.tokenizer )
_a = AutoConfig.from_pretrained(args.pretrained_model_config )
_a = tokenizer.vocab_size
_a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
_a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
_a = count_samples(lowercase )
_a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_a = steps_per_epoch * args.num_epochs
with strategy.scope():
_a = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_a , _a = create_optimizer(
num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase , metrics=["accuracy"] )
def decode_fn(lowercase : int ):
_a = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase , lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_a = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" )
def mask_with_collator(lowercase : List[Any] ):
# TF really needs an isin() function
_a = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
_a , _a = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , )
return batch
_a = args.per_replica_batch_size * strategy.num_replicas_in_sync
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , )
_a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) )
model.fit(
lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase_ : Any = parse_args()
main(args)
| 63 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class snake_case_ :
def __init__( self :Tuple ,__snake_case :Union[str, Any] ,) -> str:
a__ = parent
a__ = 13
a__ = 7
a__ = 30
a__ = self.seq_length + self.mem_len
a__ = 15
a__ = True
a__ = True
a__ = 99
a__ = [10, 50, 80]
a__ = 32
a__ = 32
a__ = 4
a__ = 8
a__ = 1_28
a__ = 2
a__ = 2
a__ = None
a__ = 1
a__ = 0
a__ = 3
a__ = self.vocab_size - 1
a__ = 0.01
def lowerCamelCase__( self :int ) -> str:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase__( self :Tuple ,__snake_case :List[Any] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :int ) -> Optional[Any]:
a__ = TFTransfoXLModel(__a )
a__ , a__ = model(__a ).to_tuple()
a__ = {'input_ids': input_ids_a, 'mems': mems_a}
a__ , a__ = model(__a ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def lowerCamelCase__( self :Tuple ,__snake_case :Tuple ,__snake_case :Union[str, Any] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ) -> str:
a__ = TFTransfoXLLMHeadModel(__a )
a__ , a__ = model(__a ).to_tuple()
a__ = {'input_ids': input_ids_a, 'labels': lm_labels}
a__ , a__ = model(__a ).to_tuple()
a__ , a__ = model([input_ids_a, mems_a] ).to_tuple()
a__ = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
a__ , a__ = model(__a ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def lowerCamelCase__( self :Dict ,__snake_case :List[Any] ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :str ) -> Optional[int]:
a__ = TFTransfoXLForSequenceClassification(__a )
a__ = model(__a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__( self :str ) -> List[Any]:
a__ = self.prepare_config_and_inputs()
((a__) , (a__) , (a__) , (a__)) = config_and_inputs
a__ = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase__ : List[str] = () if is_tf_available() else ()
UpperCAmelCase__ : Tuple = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCAmelCase__ : str = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : int = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Union[str, Any] ,__snake_case :List[str] ,__snake_case :Tuple ,__snake_case :Optional[Any] ,__snake_case :Tuple ) -> List[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase__( self :str ) -> List[str]:
a__ = TFTransfoXLModelTester(self )
a__ = ConfigTester(self ,config_class=__a ,d_embed=37 )
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :Any ) -> str:
self.model_tester.set_seed()
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__a )
def lowerCamelCase__( self :str ) -> List[str]:
self.model_tester.set_seed()
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__a )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__a )
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
a__ = model_class(__a )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
a__ = model.get_output_embeddings()
assert isinstance(__a ,tf.keras.layers.Layer )
a__ = model.get_bias()
assert name is None
else:
a__ = model.get_output_embeddings()
assert x is None
a__ = model.get_bias()
assert name is None
def lowerCamelCase__( self :Optional[int] ) -> Optional[int]:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCamelCase__( self :Tuple ) -> Tuple:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TFTransfoXLModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def lowerCamelCase__( self :Tuple ) -> Dict:
pass
@require_tf
class snake_case_ (unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def lowerCamelCase__( self :Optional[int] ) -> Dict:
a__ = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
a__ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
a__ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
a__ = model.generate(__a ,max_length=2_00 ,do_sample=__a )
self.assertListEqual(output_ids[0].numpy().tolist() ,__a )
| 240 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv3ImageProcessor'
__a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : Tuple ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_2_8:
if name[-1] == "S":
_a : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
_a : Any = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 1_9_2:
_a : int = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 2_5_6:
_a : str = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 3_8_4:
_a : List[Any] = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
_a : Dict = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
_a : Union[str, Any] = OrderedDict()
_a : Tuple = from_model.state_dict()
_a : Any = list(from_model.state_dict().keys() )
_a : Dict = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
_a : Union[str, Any] = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
_a : int = torch.randn((2, 3, 2_2_4, 2_2_4) )
_a : Dict = from_model(UpperCamelCase__ )
_a : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
_a : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_a : List[Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ):
'''simple docstring'''
_a : Dict = """imagenet-1k-id2label.json"""
_a : int = 1_0_0_0
_a : Any = (1, num_labels)
_a : List[Any] = """huggingface/label-files"""
_a : Optional[int] = num_labels
_a : str = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
_a : Union[str, Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_a : Tuple = idalabel
_a : Dict = {v: k for k, v in idalabel.items()}
_a : int = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
_a : Any = {
"""levit-128S""": 1_2_8,
"""levit-128""": 1_2_8,
"""levit-192""": 1_9_2,
"""levit-256""": 2_5_6,
"""levit-384""": 3_8_4,
}
_a : Tuple = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 8, 1_2] , depths=[4, 4, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[1_9_2, 2_8_8, 3_8_4] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[2_5_6, 3_8_4, 5_1_2] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[3_8_4, 5_1_2, 7_6_8] , num_attention_heads=[6, 9, 1_2] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_snake_case = parser.parse_args()
_snake_case = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 294 |
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class a__ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = AlbertTokenizer
__lowerCamelCase = AlbertTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = True
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
A__ = "this is a test"
A__ = "this is a test"
return input_text, output_text
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = "<pad>"
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(__a ) , 30000 )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = "I was born in 92000, and this is falsé."
A__ = tokenizer.tokenize(__a )
A__ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
A__ = tokenizer.encode(__a , add_special_tokens=__a )
A__ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(__a )
A__ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = AlbertTokenizer(__a , keep_accents=__a )
A__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1289] )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
A__ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
A__ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = AlbertTokenizer(__a )
A__ = tokenizer.encode("sequence builders" )
A__ = tokenizer.encode("multi-sequence build" )
A__ = tokenizer.build_inputs_with_special_tokens(__a )
A__ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 68 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 100 ) -> int:
_a = 0
_a = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _snake_case ( lowerCamelCase_ ):
lowerCamelCase__: List[str] = "vit_mae"
def __init__( self: Union[str, Any] , __lowerCamelCase: Tuple=7_68 , __lowerCamelCase: str=12 , __lowerCamelCase: List[str]=12 , __lowerCamelCase: List[str]=30_72 , __lowerCamelCase: Optional[Any]="gelu" , __lowerCamelCase: List[str]=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: Optional[int]=1e-12 , __lowerCamelCase: List[str]=2_24 , __lowerCamelCase: List[Any]=16 , __lowerCamelCase: Tuple=3 , __lowerCamelCase: Dict=True , __lowerCamelCase: List[str]=16 , __lowerCamelCase: List[str]=5_12 , __lowerCamelCase: Optional[int]=8 , __lowerCamelCase: Dict=20_48 , __lowerCamelCase: Any=0.75 , __lowerCamelCase: int=False , **__lowerCamelCase: List[Any] , ) -> Optional[Any]:
super().__init__(**__a )
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : Optional[int] = patch_size
__UpperCAmelCase : str = num_channels
__UpperCAmelCase : int = qkv_bias
__UpperCAmelCase : Any = decoder_num_attention_heads
__UpperCAmelCase : int = decoder_hidden_size
__UpperCAmelCase : str = decoder_num_hidden_layers
__UpperCAmelCase : str = decoder_intermediate_size
__UpperCAmelCase : str = mask_ratio
__UpperCAmelCase : List[str] = norm_pix_loss
| 157 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
if num < 0:
return False
_a = num
_a = 0
while num > 0:
_a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
'''simple docstring'''
import os
def __lowerCAmelCase ( ):
with open(os.path.dirname(snake_case__ ) + "/grid.txt" ) as f:
__UpperCamelCase : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(snake_case__ ) for x in f.readline().split()] )
__UpperCamelCase : Dict = 0
# right
for i in range(20 ):
for j in range(17 ):
__UpperCamelCase : Any = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__UpperCamelCase : Dict = temp
# down
for i in range(17 ):
for j in range(20 ):
__UpperCamelCase : Tuple = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__UpperCamelCase : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__UpperCamelCase : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__UpperCamelCase : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
__UpperCamelCase : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__UpperCamelCase : List[str] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 298 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 | 0 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _UpperCamelCase ( lowercase__ , lowercase__ ):
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE : Optional[int] = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_sql_dataset(lowercase__ , lowercase__ )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE : Tuple = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE : List[str] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_sql_dataset(lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
__SCREAMING_SNAKE_CASE : Optional[Any] = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowercase__ , '''tmp.sql''' )
__SCREAMING_SNAKE_CASE : Dict = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=lowercase__ ).read()
SqlDatasetWriter(lowercase__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
__SCREAMING_SNAKE_CASE : str = iter_sql_file(lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = iter_sql_file(lowercase__ )
for rowa, rowa in zip(lowercase__ , lowercase__ ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(lowercase__ , '''tmp.sql''' )
__SCREAMING_SNAKE_CASE : str = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=lowercase__ ).read()
SqlDatasetWriter(lowercase__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
__SCREAMING_SNAKE_CASE : Dict = iter_sql_file(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = iter_sql_file(lowercase__ )
for rowa, rowa in zip(lowercase__ , lowercase__ ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowercase__ , '''tmp.sql''' )
__SCREAMING_SNAKE_CASE : List[Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=lowercase__ ).read()
with pytest.raises(lowercase__ ):
SqlDatasetWriter(lowercase__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 9 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ : Any = get_tests_dir('fixtures')
lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
_a = 0
def UpperCamelCase__ ( self : str ):
_a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
_a = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : Optional[Any] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : List[Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCamelCase__ ( self : Any ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : Tuple ):
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 63 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=6 , __UpperCAmelCase=1_7 , __UpperCAmelCase=2_3 , __UpperCAmelCase=1_1 , __UpperCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :Dict = parent
lowerCAmelCase__ :Optional[Any] = batch_size
lowerCAmelCase__ :int = seq_length
lowerCAmelCase__ :str = act_dim
lowerCAmelCase__ :Optional[Any] = state_dim
lowerCAmelCase__ :List[str] = hidden_size
lowerCAmelCase__ :str = max_length
lowerCAmelCase__ :Optional[int] = is_training
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowerCAmelCase__ :List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowerCAmelCase__ :Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCAmelCase__ :List[str] = floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCAmelCase__ :List[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
lowerCAmelCase__ :List[str] = random_attention_mask((self.batch_size, self.seq_length) )
lowerCAmelCase__ :Tuple = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def snake_case ( self ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = DecisionTransformerModel(config=__a )
model.to(__a )
model.eval()
lowerCAmelCase__ :int = model(__a , __a , __a , __a , __a , __a )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :Any = config_and_inputs
lowerCAmelCase__ :Optional[Any] = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = (DecisionTransformerModel,) if is_torch_available() else ()
__magic_name__ :Tuple = ()
__magic_name__ :Tuple = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__magic_name__ :Tuple = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__magic_name__ :Union[str, Any] = False
__magic_name__ :Any = False
__magic_name__ :Optional[Any] = False
__magic_name__ :Optional[Any] = False
__magic_name__ :List[Any] = False
__magic_name__ :Tuple = False
__magic_name__ :Any = False
__magic_name__ :str = False
__magic_name__ :Dict = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = DecisionTransformerModelTester(self )
lowerCAmelCase__ :Optional[int] = ConfigTester(self , config_class=__a , hidden_size=3_7 )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@slow
def snake_case ( self ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :List[Any] = DecisionTransformerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :str = model_class(__a )
lowerCAmelCase__ :List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ :List[str] = [*signature.parameters.keys()]
lowerCAmelCase__ :Tuple = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(__a )] , __a )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = 2 # number of steps of autoregressive prediction we will perform
lowerCAmelCase__ :Any = 1_0 # defined by the RL environment, may be normalized
lowerCAmelCase__ :List[str] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
lowerCAmelCase__ :List[str] = model.to(__a )
lowerCAmelCase__ :List[Any] = model.config
torch.manual_seed(0 )
lowerCAmelCase__ :str = torch.randn(1 , 1 , config.state_dim ).to(device=__a , dtype=torch.floataa ) # env.reset()
lowerCAmelCase__ :Any = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=__a )
lowerCAmelCase__ :str = torch.tensor(__a , device=__a , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowerCAmelCase__ :List[Any] = state
lowerCAmelCase__ :Optional[Any] = torch.zeros(1 , 0 , config.act_dim , device=__a , dtype=torch.floataa )
lowerCAmelCase__ :List[str] = torch.zeros(1 , 0 , device=__a , dtype=torch.floataa )
lowerCAmelCase__ :Union[str, Any] = torch.tensor(0 , device=__a , dtype=torch.long ).reshape(1 , 1 )
for step in range(__a ):
lowerCAmelCase__ :Tuple = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__a )] , dim=1 )
lowerCAmelCase__ :Union[str, Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=__a )] , dim=1 )
lowerCAmelCase__ :str = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = model(
states=__a , actions=__a , rewards=__a , returns_to_go=__a , timesteps=__a , attention_mask=__a , return_dict=__a , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__a , dtype=torch.floataa ),
1.0,
False,
{},
)
lowerCAmelCase__ :Tuple = action_pred[0, -1]
lowerCAmelCase__ :Dict = torch.cat([states, state] , dim=1 )
lowerCAmelCase__ :Optional[int] = returns_to_go[0, -1] - reward
lowerCAmelCase__ :List[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowerCAmelCase__ :Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=__a , dtype=torch.long ) * (step + 1)] , dim=1 )
| 293 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='gpt_bigcode'
__a =['past_key_values']
__a ={
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = attention_softmax_in_fpaa
_a = scale_attention_softmax_in_fpaa
_a = multi_query
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
| 63 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ = FlaxAutoModelForSeqaSeqLM.from_config(config=_A )
SCREAMING_SNAKE_CASE__ = checkpoints.load_tax_checkpoint(_A )
SCREAMING_SNAKE_CASE__ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
SCREAMING_SNAKE_CASE__ = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
SCREAMING_SNAKE_CASE__ = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE__ = F'''layers_{str(_A )}'''
# Self-Attention
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
SCREAMING_SNAKE_CASE__ = flax_model.params['''encoder''']['''block'''][str(_A )]['''layer''']
SCREAMING_SNAKE_CASE__ = tax_attention_key
SCREAMING_SNAKE_CASE__ = tax_attention_out
SCREAMING_SNAKE_CASE__ = tax_attention_query
SCREAMING_SNAKE_CASE__ = tax_attention_value
SCREAMING_SNAKE_CASE__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = tax_global_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi
SCREAMING_SNAKE_CASE__ = tax_mlp_wo
SCREAMING_SNAKE_CASE__ = tax_mlp_layer_norm
SCREAMING_SNAKE_CASE__ = flax_model_encoder_layer_block
# Only for layer 0:
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
SCREAMING_SNAKE_CASE__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
SCREAMING_SNAKE_CASE__ = tax_encoder_global_rel_embedding
# Assigning
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
SCREAMING_SNAKE_CASE__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE__ = F'''layers_{str(_A )}'''
# Self-Attention
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module['''key''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module['''out''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module['''query''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
SCREAMING_SNAKE_CASE__ = flax_model.params['''decoder''']['''block'''][str(_A )]['''layer''']
SCREAMING_SNAKE_CASE__ = tax_attention_key
SCREAMING_SNAKE_CASE__ = tax_attention_out
SCREAMING_SNAKE_CASE__ = tax_attention_query
SCREAMING_SNAKE_CASE__ = tax_attention_value
SCREAMING_SNAKE_CASE__ = tax_pre_attention_layer_norm
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_key
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_out
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_query
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_value
SCREAMING_SNAKE_CASE__ = tax_cross_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi
SCREAMING_SNAKE_CASE__ = tax_mlp_wo
SCREAMING_SNAKE_CASE__ = txa_mlp_layer_norm
SCREAMING_SNAKE_CASE__ = flax_model_decoder_layer_block
# Decoder Normalization
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
SCREAMING_SNAKE_CASE__ = txa_decoder_norm
# Only for layer 0:
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
SCREAMING_SNAKE_CASE__ = tax_decoder_rel_embedding
# Token Embeddings
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''token_embedder''']['''embedding''']
SCREAMING_SNAKE_CASE__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
SCREAMING_SNAKE_CASE__ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(_A )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
_SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 314 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase: int = logging.get_logger(__name__)
_lowercase: str = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_lowercase: List[Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_lowercase: str = {'facebook/blenderbot_small-90M': 512}
def a( A : List[Any] ) -> List[str]:
"""simple docstring"""
a = set()
a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a = char
a = set(A )
return pairs
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ["input_ids", "attention_mask"]
def __init__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="__start__" , lowerCamelCase_="__end__" , lowerCamelCase_="__unk__" , lowerCamelCase_="__null__" , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , **__a )
with open(__a , encoding="utf-8" ) as vocab_handle:
a = json.load(__a )
a = {v: k for k, v in self.encoder.items()}
with open(__a , encoding="utf-8" ) as merges_handle:
a = merges_handle.read().split("\n" )[1:-1]
a = [tuple(merge.split() ) for merge in merges]
a = dict(zip(__a , range(len(__a ) ) ) )
a = {}
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase_ (self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a = re.sub("([.,!?()])" , R" \1" , __a )
a = re.sub("(')" , R" \1 " , __a )
a = re.sub(R"\s{2,}" , " " , __a )
if "\n" in token:
a = token.replace("\n" , " __newln__" )
a = token.split(" " )
a = []
for token in tokens:
if not len(__a ):
continue
a = token.lower()
a = tuple(__a )
a = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
a = get_pairs(__a )
if not pairs:
words.append(__a )
continue
while True:
a = min(__a , key=lambda lowerCamelCase_ : self.bpe_ranks.get(__a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
a , a = bigram
a = []
a = 0
while i < len(__a ):
try:
a = word.index(__a , __a )
new_word.extend(word[i:j] )
a = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a = tuple(__a )
a = new_word
if len(__a ) == 1:
break
else:
a = get_pairs(__a )
a = "@@ ".join(__a )
a = word[:-4]
a = word
words.append(__a )
return " ".join(__a )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = []
a = re.findall(R"\S+\n?" , __a )
for token in words:
split_tokens.extend(list(self.bpe(__a ).split(" " ) ) )
return split_tokens
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = token.lower()
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = " ".join(__a ).replace("@@ " , "" ).strip()
return out_string
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
a = 0
with open(__a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
a = token_index
writer.write(" ".join(__a ) + "\n" )
index += 1
return vocab_file, merge_file
| 227 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deta'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Dict ):
return self.d_model
def UpperCamelCase__ ( self : List[str] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 63 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__lowerCamelCase = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 162 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase )
_a = tok.pad_token_id
def get_lens(lowercase : Optional[int] ):
_a = tqdm(
DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist()
_a = batch["labels"].ne(lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase , lowercase ):
max_lens.append(max(lowercase , lowercase ) )
else:
max_lens.extend(lowercase )
return max_lens
_a = get_lens(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase )
_a = get_lens(lowercase )
pickle_save(lowercase , train_ds.len_file )
pickle_save(lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 63 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_A = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_A = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print('''\n'''.join(upper_files) + '''\n''')
_A = [file for file in filepaths if ' ' in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print('''\n'''.join(space_files) + '''\n''')
_A = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print('''\n'''.join(hyphen_files) + '''\n''')
_A = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print('''\n'''.join(nodir_files) + '''\n''')
_A = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 122 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
_a = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
# pass variant but use the non-variant filenames
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Dict ):
_a = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : List[str] ):
# pass variant but use the non-variant filenames
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 63 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
snake_case : Union[str, Any] = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
def __init__( self :List[Any] ,*__snake_case :Optional[Any] ,**__snake_case :Tuple ) -> List[Any]:
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' ,__a ,)
super().__init__(*__a ,**__a )
| 240 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : bytes ) -> str:
return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] )
def _lowerCamelCase ( lowercase : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_snake_case = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
_a : List[str] = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
_a : str = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
_a : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : Any = value
elif weight_type == "weight_g":
_a : Tuple = value
elif weight_type == "weight_v":
_a : Optional[int] = value
elif weight_type == "bias":
_a : Optional[int] = value
else:
_a : str = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = []
_a : Union[str, Any] = fairseq_model.state_dict()
_a : int = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
_a : List[str] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_a : List[Any] = True
if "*" in mapped_key:
_a : Dict = name.split(UpperCamelCase__ )[0].split(""".""" )[-2]
_a : str = mapped_key.replace("""*""" , UpperCamelCase__ )
if "weight_g" in name:
_a : List[Any] = """weight_g"""
elif "weight_v" in name:
_a : Optional[int] = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
_a : Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_a : str = """weight"""
else:
_a : Dict = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = full_name.split("""conv_layers.""" )[-1]
_a : str = name.split(""".""" )
_a : str = int(items[0] )
_a : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : Optional[int] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
# load the pre-trained checkpoints
_a : Optional[int] = torch.load(UpperCamelCase__ )
_a : Union[str, Any] = WavLMConfigOrig(checkpoint["""cfg"""] )
_a : List[Any] = WavLMOrig(UpperCamelCase__ )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
_a : Any = WavLMConfig.from_pretrained(UpperCamelCase__ )
else:
_a : Tuple = WavLMConfig()
_a : Tuple = WavLMModel(UpperCamelCase__ )
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ )
hf_wavlm.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_snake_case = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 294 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any:
model.train()
_a = model(lowercase )
_a = F.mse_loss(lowercase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]:
set_seed(42 )
_a = RegressionModel()
_a = deepcopy(lowercase )
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
model.to(accelerator.device )
if sched:
_a = AdamW(params=model.parameters() , lr=1E-3 )
_a = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
_a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase )
else:
_a , _a = accelerator.prepare(lowercase , lowercase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]:
# Test when on a single CPU or GPU that the context manager does nothing
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : Tuple ) -> Tuple:
# Test on distributed setup that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
GradientState._reset_state()
def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
_a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _lowerCamelCase ( ) -> Any:
_a = Accelerator()
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
_a = RegressionDataset(length=96 )
_a = DataLoader(lowercase , batch_size=16 )
_a , _a = accelerator.prepare(lowercase , lowercase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if iteration < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if batch_num < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCamelCase ( ) -> Optional[Any]:
_a = Accelerator()
_a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowercase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowercase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowercase , lowercase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase )
def _lowerCamelCase ( lowercase : Any ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 63 | 0 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCAmelCase__ = 6_3_7_8_1_3_7.0
lowerCAmelCase__ = 6_3_5_6_7_5_2.3_1_4_2_4_5
lowerCAmelCase__ = 6_3_7_8_1_3_7
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: float , SCREAMING_SNAKE_CASE_: float , SCREAMING_SNAKE_CASE_: float , SCREAMING_SNAKE_CASE_: float ) -> float:
'''simple docstring'''
A__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
A__ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
A__ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
A__ = haversine_distance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
A__ = (b_lata + b_lata) / 2
A__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
A__ = (sin(SCREAMING_SNAKE_CASE_ ) ** 2) * (cos(SCREAMING_SNAKE_CASE_ ) ** 2)
A__ = cos(sigma / 2 ) ** 2
A__ = (sigma - sin(SCREAMING_SNAKE_CASE_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
A__ = (cos(SCREAMING_SNAKE_CASE_ ) ** 2) * (sin(SCREAMING_SNAKE_CASE_ ) ** 2)
A__ = sin(sigma / 2 ) ** 2
A__ = (sigma + sin(SCREAMING_SNAKE_CASE_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='trocr'
__a =['past_key_values']
__a ={
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_layers
_a = decoder_attention_heads
_a = decoder_ffn_dim
_a = activation_function
_a = max_position_embeddings
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = scale_embedding
_a = use_learned_position_embeddings
_a = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 63 | 0 |
from math import sqrt
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = 0
for i in range(1, int(sqrt(snake_case__ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case__ ):
total += i + n // i
elif i == sqrt(snake_case__ ):
total += i
return total - n
def _UpperCamelCase ( snake_case__ = 1_0000 ) -> int:
__UpperCAmelCase : Tuple = sum(
i
for i in range(1, snake_case__ )
if sum_of_divisors(sum_of_divisors(snake_case__ ) ) == i and sum_of_divisors(snake_case__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 157 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase_ : Any = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]:
with open(lowercase , "r" , encoding="utf-8" ) as f:
_a = f.read()
_a = content.split("\n" )
_a = []
_a = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def _lowerCamelCase ( lowercase : bool = False ) -> List[str]:
_a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )]
_a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
_a = [f for f, d in zip(lowercase , lowercase ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 63 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
A = "bert"
def __init__(self , _UpperCAmelCase=3_0_5_2_2 , _UpperCAmelCase=7_6_8 , _UpperCAmelCase=1_2 , _UpperCAmelCase=1_2 , _UpperCAmelCase=3_0_7_2 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> List[str]:
super().__init__(pad_token_id=__a , **__a )
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : List[str] = hidden_size
__UpperCamelCase : Any = num_hidden_layers
__UpperCamelCase : Optional[Any] = num_attention_heads
__UpperCamelCase : Any = hidden_act
__UpperCamelCase : Union[str, Any] = intermediate_size
__UpperCamelCase : str = hidden_dropout_prob
__UpperCamelCase : int = attention_probs_dropout_prob
__UpperCamelCase : Dict = max_position_embeddings
__UpperCamelCase : Dict = type_vocab_size
__UpperCamelCase : List[str] = initializer_range
__UpperCamelCase : Optional[Any] = layer_norm_eps
__UpperCamelCase : Optional[Any] = position_embedding_type
__UpperCamelCase : Optional[int] = use_cache
__UpperCamelCase : Optional[Any] = classifier_dropout
class A ( lowerCamelCase_ ):
'''simple docstring'''
@property
def a_ (self ) -> List[str]:
if self.task == "multiple-choice":
__UpperCamelCase : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCamelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 298 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='big_bird'
def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ):
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = type_vocab_size
_a = layer_norm_eps
_a = use_cache
_a = rescale_embeddings
_a = attention_type
_a = use_bias
_a = block_size
_a = num_random_blocks
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 63 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( lowerCamelCase_ ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__a , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__a , '''num_attention_heads''' ) )
class _lowercase :
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=13 , lowerCAmelCase__ :List[Any]=32 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :Optional[Any]=640 , lowerCAmelCase__ :Optional[int]=4 , lowerCAmelCase__ :List[str]="silu" , lowerCAmelCase__ :Tuple=3 , lowerCAmelCase__ :List[Any]=32 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :str=None , ) -> List[Any]:
__SCREAMING_SNAKE_CASE : int = parent
__SCREAMING_SNAKE_CASE : int = batch_size
__SCREAMING_SNAKE_CASE : str = image_size
__SCREAMING_SNAKE_CASE : List[Any] = patch_size
__SCREAMING_SNAKE_CASE : Tuple = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = last_hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : int = hidden_act
__SCREAMING_SNAKE_CASE : int = conv_kernel_size
__SCREAMING_SNAKE_CASE : List[str] = output_stride
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Any = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = use_labels
__SCREAMING_SNAKE_CASE : Optional[int] = is_training
__SCREAMING_SNAKE_CASE : List[Any] = num_labels
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Any = scope
def __magic_name__( self :Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__( self :str ) -> Dict:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__( self :int , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileViTModel(config=__a )
model.to(__a )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : int = self.num_labels
__SCREAMING_SNAKE_CASE : int = MobileViTForImageClassification(__a )
model.to(__a )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileViTForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__SCREAMING_SNAKE_CASE : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = config_and_inputs
__SCREAMING_SNAKE_CASE : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Tuple = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def __magic_name__( self :List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileViTModelTester(self )
__SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTConfigTester(self , config_class=__a , has_text_modality=__a )
def __magic_name__( self :Optional[int] ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def __magic_name__( self :Any ) -> Any:
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def __magic_name__( self :Any ) -> str:
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def __magic_name__( self :List[Any] ) -> Optional[int]:
pass
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(__a )
__SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE : List[Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__( self :Union[str, Any] ) -> str:
pass
def __magic_name__( self :Dict ) -> Any:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __magic_name__( self :Tuple ) -> List[str]:
def check_hidden_states_output(lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(__a , __a ) )
__SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
__SCREAMING_SNAKE_CASE : Dict = 5
self.assertEqual(len(__a ) , __a )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__SCREAMING_SNAKE_CASE : int = 2
for i in range(len(__a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE : int = True
check_hidden_states_output(__a , __a , __a )
def __magic_name__( self :int ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def __magic_name__( self :Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def __magic_name__( self :int ) -> str:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : str = MobileViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__( self :Union[str, Any] ) -> List[str]:
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def __magic_name__( self :Tuple ) -> int:
__SCREAMING_SNAKE_CASE : int = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(__a )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.default_image_processor
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
__SCREAMING_SNAKE_CASE : str = image_processor(images=__a , return_tensors='''pt''' ).to(__a )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(**__a )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE : List[str] = model.to(__a )
__SCREAMING_SNAKE_CASE : Tuple = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE : str = prepare_img()
__SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=__a , return_tensors='''pt''' ).to(__a )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Tuple = model(**__a )
__SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
# verify the logits
__SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __a )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
@slow
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : int = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE : int = model.to(__a )
__SCREAMING_SNAKE_CASE : List[str] = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE : Tuple = prepare_img()
__SCREAMING_SNAKE_CASE : List[str] = image_processor(images=__a , return_tensors='''pt''' ).to(__a )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[Any] = model(**__a )
__SCREAMING_SNAKE_CASE : List[Any] = outputs.logits.detach().cpu()
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__a , target_sizes=[(50, 60)] )
__SCREAMING_SNAKE_CASE : str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __a )
__SCREAMING_SNAKE_CASE : Any = image_processor.post_process_semantic_segmentation(outputs=__a )
__SCREAMING_SNAKE_CASE : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __a )
| 9 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , *,
__a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ):
super().__init__()
_a = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
_a = nn.Linear(__a , __a )
_a = nn.Linear(__a , __a )
# parameters for encoder hidden states
_a = clip_extra_context_tokens
_a = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
_a = nn.Linear(__a , __a )
_a = nn.LayerNorm(__a )
def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_a = image_embeddings.shape[0]
_a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_a = classifier_free_guidance_embeddings.expand(
__a , -1 )
_a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_a = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_a = self.embedding_proj(__a )
_a = self.clip_image_embeddings_project_to_time_embeddings(__a )
_a = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_a = self.clip_extra_context_tokens_proj(__a )
_a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
_a = clip_extra_context_tokens.permute(0 , 2 , 1 )
_a = self.encoder_hidden_states_proj(__a )
_a = self.text_encoder_hidden_states_norm(__a )
_a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 63 | 0 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE = 100 ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Any = 0
lowerCAmelCase__ :Optional[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 293 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63 | 0 |
import numpy
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : numpy.ndarray , __lowerCamelCase : numpy.ndarray ) -> Any:
SCREAMING_SNAKE_CASE__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ = numpy.zeros(output_array.shape )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowercase_ ( self : Any , __lowerCamelCase : numpy.ndarray , __lowerCamelCase : int , __lowerCamelCase : bool ) -> Optional[int]:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def lowercase_ ( self : Tuple , __lowerCamelCase : numpy.ndarray ) -> List[str]:
SCREAMING_SNAKE_CASE__ = input_arr
SCREAMING_SNAKE_CASE__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ = TwoHiddenLayerNeuralNetwork(
input_array=_A , output_array=_A )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_A , iterations=10 , give_loss=_A )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 314 |
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ):
_a = 0.0
_a = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowercase , lowercase )
# Update the winning vector
_a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowercase , lowercase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ = None ):
"""simple docstring"""
if components is None:
a = []
a = list(__a )
def __len__(self ):
"""simple docstring"""
return len(self.__components )
def __str__(self ):
"""simple docstring"""
return "(" + ",".join(map(__a , self.__components ) ) + ")"
def __add__(self , lowerCamelCase_ ):
"""simple docstring"""
a = len(self )
if size == len(__a ):
a = [self.__components[i] + other.component(__a ) for i in range(__a )]
return Vector(__a )
else:
raise Exception("must have the same size" )
def __sub__(self , lowerCamelCase_ ):
"""simple docstring"""
a = len(self )
if size == len(__a ):
a = [self.__components[i] - other.component(__a ) for i in range(__a )]
return Vector(__a )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__(self , lowerCamelCase_ ):
"""simple docstring"""
...
@overload
def __mul__(self , lowerCamelCase_ ):
"""simple docstring"""
...
def __mul__(self , lowerCamelCase_ ):
"""simple docstring"""
if isinstance(__a , (float, int) ):
a = [c * other for c in self.__components]
return Vector(__a )
elif isinstance(__a , __a ) and len(self ) == len(__a ):
a = len(self )
a = [self.__components[i] * other.component(__a ) for i in range(__a )]
return sum(__a )
else: # error case
raise Exception("invalid operand!" )
def UpperCamelCase_ (self ):
"""simple docstring"""
return Vector(self.__components )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
if isinstance(__a , __a ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
a = value
def UpperCamelCase_ (self ):
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
a = [c**2 for c in self.__components]
return math.sqrt(sum(__a ) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = False ):
"""simple docstring"""
a = self * other
a = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def a( A : int ) -> Vector:
"""simple docstring"""
assert isinstance(A , A )
return Vector([0] * dimension )
def a( A : int , A : int ) -> Vector:
"""simple docstring"""
assert isinstance(A , A ) and (isinstance(A , A ))
a = [0] * dimension
a = 1
return Vector(A )
def a( A : float , A : Vector , A : Vector ) -> Vector:
"""simple docstring"""
assert (
isinstance(A , A )
and isinstance(A , A )
and (isinstance(A , (int, float) ))
)
return x * scalar + y
def a( A : int , A : int , A : int ) -> Vector:
"""simple docstring"""
random.seed(A )
a = [random.randint(A , A ) for _ in range(A )]
return Vector(A )
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = matrix
a = w
a = h
def __str__(self ):
"""simple docstring"""
a = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__(self , lowerCamelCase_ ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
a = []
for i in range(self.__height ):
a = [
self.__matrix[i][j] + other.component(__a , __a )
for j in range(self.__width )
]
matrix.append(__a )
return Matrix(__a , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__(self , lowerCamelCase_ ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
a = []
for i in range(self.__height ):
a = [
self.__matrix[i][j] - other.component(__a , __a )
for j in range(self.__width )
]
matrix.append(__a )
return Matrix(__a , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__(self , lowerCamelCase_ ):
"""simple docstring"""
...
@overload
def __mul__(self , lowerCamelCase_ ):
"""simple docstring"""
...
def __mul__(self , lowerCamelCase_ ):
"""simple docstring"""
if isinstance(__a , __a ): # matrix-vector
if len(__a ) == self.__width:
a = zero_vector(self.__height )
for i in range(self.__height ):
a = [
self.__matrix[i][j] * other.component(__a )
for j in range(self.__width )
]
ans.change_component(__a , sum(__a ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(__a , (int, float) ): # matrix-scalar
a = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__a , self.__width , self.__height )
return None
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.__height
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.__width
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
a = value
else:
raise Exception("change_component: indices out of bounds" )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception("Matrix is not square" )
a = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__a ) ):
a = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__a , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__a , __a )
else:
raise Exception("Indices out of bounds" )
def UpperCamelCase_ (self ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
a = [
self.__matrix[0][y] * self.cofactor(0 , __a ) for y in range(self.__width )
]
return sum(__a )
def a( A : int ) -> Matrix:
"""simple docstring"""
a = [[0] * n for _ in range(A )]
return Matrix(A , A , A )
def a( A : int , A : int , A : int , A : int ) -> Matrix:
"""simple docstring"""
random.seed(A )
a = [
[random.randint(A , A ) for _ in range(A )] for _ in range(A )
]
return Matrix(A , A , A )
| 227 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='OwlViTImageProcessor'
__a =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )):
_a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )]
elif isinstance(__a , __a ) and isinstance(text[0] , __a ):
_a = []
# Maximum number of queries across batch
_a = max([len(__a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__a ) != max_num_queries:
_a = t + [" "] * (max_num_queries - len(__a ))
_a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )
encodings.append(__a )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_a = BatchEncoding()
_a = input_ids
_a = attention_mask
if query_images is not None:
_a = BatchEncoding()
_a = self.image_processor(
__a , return_tensors=__a , **__a ).pixel_values
_a = query_pixel_values
if images is not None:
_a = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ):
return self.image_processor.post_process(*__a , **__a )
def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ):
return self.image_processor.post_process_object_detection(*__a , **__a )
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.image_processor.post_process_image_guided_detection(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 162 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(lowercase ) ):
series.append(F'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 63 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class lowercase_ ( lowerCamelCase_ ):
A__ : Optional[Any] = """conditional_detr"""
A__ : Optional[Any] = ["""past_key_values"""]
A__ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=3_0_0 , __UpperCamelCase=6 , __UpperCamelCase=2_0_4_8 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=2_0_4_8 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_5_6 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=2 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.25 , **__UpperCamelCase , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__a , __a ):
UpperCamelCase_ = backbone_config.get("""model_type""" )
UpperCamelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase_ = config_class.from_dict(__a )
UpperCamelCase_ = use_timm_backbone
UpperCamelCase_ = backbone_config
UpperCamelCase_ = num_channels
UpperCamelCase_ = num_queries
UpperCamelCase_ = d_model
UpperCamelCase_ = encoder_ffn_dim
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = encoder_attention_heads
UpperCamelCase_ = decoder_ffn_dim
UpperCamelCase_ = decoder_layers
UpperCamelCase_ = decoder_attention_heads
UpperCamelCase_ = dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = activation_dropout
UpperCamelCase_ = activation_function
UpperCamelCase_ = init_std
UpperCamelCase_ = init_xavier_std
UpperCamelCase_ = encoder_layerdrop
UpperCamelCase_ = decoder_layerdrop
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = auxiliary_loss
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = backbone
UpperCamelCase_ = use_pretrained_backbone
UpperCamelCase_ = dilation
# Hungarian matcher
UpperCamelCase_ = class_cost
UpperCamelCase_ = bbox_cost
UpperCamelCase_ = giou_cost
# Loss coefficients
UpperCamelCase_ = mask_loss_coefficient
UpperCamelCase_ = dice_loss_coefficient
UpperCamelCase_ = cls_loss_coefficient
UpperCamelCase_ = bbox_loss_coefficient
UpperCamelCase_ = giou_loss_coefficient
UpperCamelCase_ = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.d_model
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase_ = self.backbone_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
class lowercase_ ( lowerCamelCase_ ):
A__ : Optional[int] = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return 1e-5
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return 1_2
| 122 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]:
try:
if args.tpu_name:
_a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def _lowerCamelCase ( lowercase : List[str] ) -> Any:
_a = 0
for file in file_list:
_a = file.split("/" )[-1]
_a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 )
_a = int(lowercase )
num_samples += sample_count
return num_samples
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int:
_a = count_samples(lowercase )
_a = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
_a = dataset.shuffle(len(lowercase ) )
_a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
_a = dataset.shuffle(args.shuffle_buffer_size )
_a = dataset.batch(lowercase , drop_remainder=lowercase )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
_a = dataset.prefetch(lowercase )
return dataset
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
if not args.no_tpu:
_a = initialize_tpu(lowercase )
_a = tf.distribute.TPUStrategy(lowercase )
else:
_a = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
_a = AutoTokenizer.from_pretrained(args.tokenizer )
_a = AutoConfig.from_pretrained(args.pretrained_model_config )
_a = tokenizer.vocab_size
_a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
_a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
_a = count_samples(lowercase )
_a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_a = steps_per_epoch * args.num_epochs
with strategy.scope():
_a = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_a , _a = create_optimizer(
num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase , metrics=["accuracy"] )
def decode_fn(lowercase : int ):
_a = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase , lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_a = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" )
def mask_with_collator(lowercase : List[Any] ):
# TF really needs an isin() function
_a = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
_a , _a = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , )
return batch
_a = args.per_replica_batch_size * strategy.num_replicas_in_sync
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , )
_a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) )
model.fit(
lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase_ : Any = parse_args()
main(args)
| 63 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
snake_case : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 240 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv3ImageProcessor'
__a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : Tuple ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_snake_case = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
_snake_case = []
_snake_case = []
_snake_case = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
_snake_case = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': F'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''',
'emoji': True,
},
}
]
_snake_case = 0
for log in Path().glob('*.log'):
_snake_case = 0
with open(log, 'r') as f:
for line in f:
_snake_case = json.loads(line)
if line.get('nodeid', '') != "":
_snake_case = line['nodeid']
if line.get('duration', None) is not None:
_snake_case = F'''{line['duration']:.4f}'''
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_snake_case = []
log.unlink()
_snake_case = ''
_snake_case = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
_snake_case = []
_snake_case = {}
for test in failed_tests:
_snake_case = test[0].split('::')
_snake_case = data[0].split('/')[-1]
if data[0] not in filesafailed:
_snake_case = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_snake_case = [test[0] for test in failed_table]
_snake_case = list(set(files))
# Count number of instances in failed_tests
_snake_case = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_snake_case = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
_snake_case = 'Too many failed tests, please see the full report in the Action results.'
_snake_case = len(err) + 10
_snake_case = message[: 3000 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
_snake_case = 'No failed tests! 🤗'
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
_snake_case = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
_snake_case = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': F'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
payload.append(action_button)
_snake_case = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': F'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''',
}
],
}
payload.append(date_report)
_snake_case = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
_snake_case = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_snake_case = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
_snake_case = row[0]
else:
_snake_case = ''
_snake_case = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''',
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 294 |
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63 | 0 |
import os
from distutils.util import strtobool
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: int ) -> str:
'''simple docstring'''
for e in env_keys:
A__ = int(os.environ.get(SCREAMING_SNAKE_CASE_ , -1 ) )
if val >= 0:
return val
return default
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Dict=False ) -> Tuple:
'''simple docstring'''
A__ = os.environ.get(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) )
return strtobool(SCREAMING_SNAKE_CASE_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Optional[Any]="no" ) -> Tuple:
'''simple docstring'''
A__ = os.environ.get(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) )
return value
| 68 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 100 ) -> int:
_a = 0
_a = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Any = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$", snake_case__ ).groups()[0]
class _snake_case ( lowerCamelCase_ ):
def __init__( self: Optional[Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str]=None , __lowerCamelCase: List[Any]=None ) -> Any:
__UpperCAmelCase : Any = file_names
__UpperCAmelCase : str = image_transform
__UpperCAmelCase : Dict = label_to_id
def __len__( self: Any ) -> List[Any]:
return len(self.file_names )
def __getitem__( self: Optional[Any] , __lowerCamelCase: str ) -> Tuple:
__UpperCAmelCase : str = self.file_names[idx]
__UpperCAmelCase : Optional[Any] = PIL.Image.open(__a )
__UpperCAmelCase : Dict = raw_image.convert("RGB" )
if self.image_transform is not None:
__UpperCAmelCase : str = self.image_transform(__a )
__UpperCAmelCase : Tuple = extract_label(__a )
if self.label_to_id is not None:
__UpperCAmelCase : str = self.label_to_id[label]
return {"image": image, "label": label}
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
# Initialize accelerator
if args.with_tracking:
__UpperCAmelCase : Optional[Any] = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
__UpperCAmelCase : Any = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : int = config["lr"]
__UpperCAmelCase : Optional[int] = int(config["num_epochs"] )
__UpperCAmelCase : str = int(config["seed"] )
__UpperCAmelCase : Optional[int] = int(config["batch_size"] )
__UpperCAmelCase : str = config["image_size"]
if not isinstance(snake_case__, (list, tuple) ):
__UpperCAmelCase : int = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
__UpperCAmelCase : Tuple = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__UpperCAmelCase : Optional[int] = int(args.checkpointing_steps )
else:
raise ValueError(
f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
__UpperCAmelCase : int = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__UpperCAmelCase : Union[str, Any] = os.path.split(snake_case__ )[-1].split("." )[0]
accelerator.init_trackers(snake_case__, snake_case__ )
# Grab all the image filenames
__UpperCAmelCase : List[str] = [os.path.join(args.data_dir, snake_case__ ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
__UpperCAmelCase : List[Any] = [extract_label(snake_case__ ) for fname in file_names]
__UpperCAmelCase : Union[str, Any] = list(set(snake_case__ ) )
id_to_label.sort()
__UpperCAmelCase : Tuple = {lbl: i for i, lbl in enumerate(snake_case__ )}
# Set the seed before splitting the data.
np.random.seed(snake_case__ )
torch.manual_seed(snake_case__ )
torch.cuda.manual_seed_all(snake_case__ )
# Split our filenames between train and validation
__UpperCAmelCase : Optional[Any] = np.random.permutation(len(snake_case__ ) )
__UpperCAmelCase : str = int(0.8 * len(snake_case__ ) )
__UpperCAmelCase : Optional[int] = random_perm[:cut]
__UpperCAmelCase : Union[str, Any] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__UpperCAmelCase : Any = Compose([RandomResizedCrop(snake_case__, scale=(0.5, 1.0) ), ToTensor()] )
__UpperCAmelCase : Any = PetsDataset(
[file_names[i] for i in train_split], image_transform=snake_case__, label_to_id=snake_case__ )
# For evaluation, we use a deterministic Resize
__UpperCAmelCase : List[Any] = Compose([Resize(snake_case__ ), ToTensor()] )
__UpperCAmelCase : Any = PetsDataset([file_names[i] for i in eval_split], image_transform=snake_case__, label_to_id=snake_case__ )
# Instantiate dataloaders.
__UpperCAmelCase : int = DataLoader(snake_case__, shuffle=snake_case__, batch_size=snake_case__, num_workers=4 )
__UpperCAmelCase : int = DataLoader(snake_case__, shuffle=snake_case__, batch_size=snake_case__, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : str = create_model("resnet50d", pretrained=snake_case__, num_classes=len(snake_case__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : Optional[int] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__UpperCAmelCase : List[str] = False
for param in model.get_classifier().parameters():
__UpperCAmelCase : Tuple = True
# We normalize the batches of images to be a bit faster.
__UpperCAmelCase : int = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
__UpperCAmelCase : List[Any] = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : Optional[int] = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
__UpperCAmelCase : Tuple = OneCycleLR(optimizer=snake_case__, max_lr=snake_case__, epochs=snake_case__, steps_per_epoch=len(snake_case__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ )
# We need to keep track of how many total steps we have iterated over
__UpperCAmelCase : Optional[int] = 0
# We also need to keep track of the starting epoch so files are named properly
__UpperCAmelCase : str = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
__UpperCAmelCase : Dict = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__UpperCAmelCase : Optional[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__UpperCAmelCase : int = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__UpperCAmelCase : int = os.path.splitext(snake_case__ )[0]
if "epoch" in training_difference:
__UpperCAmelCase : Any = int(training_difference.replace("epoch_", "" ) ) + 1
__UpperCAmelCase : Tuple = None
else:
__UpperCAmelCase : List[Any] = int(training_difference.replace("step_", "" ) )
__UpperCAmelCase : Dict = resume_step // len(snake_case__ )
resume_step -= starting_epoch * len(snake_case__ )
# Now we train the model
for epoch in range(snake_case__, snake_case__ ):
model.train()
if args.with_tracking:
__UpperCAmelCase : Any = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__UpperCAmelCase : Optional[Any] = accelerator.skip_first_batches(snake_case__, snake_case__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__UpperCAmelCase : Dict = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__UpperCAmelCase : List[str] = {k: v.to(accelerator.device ) for k, v in batch.items()}
__UpperCAmelCase : Optional[Any] = (batch["image"] - mean) / std
__UpperCAmelCase : Any = model(snake_case__ )
__UpperCAmelCase : List[Any] = torch.nn.functional.cross_entropy(snake_case__, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(snake_case__, snake_case__ ):
__UpperCAmelCase : str = f'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__UpperCAmelCase : Tuple = os.path.join(args.output_dir, snake_case__ )
accelerator.save_state(snake_case__ )
model.eval()
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__UpperCAmelCase : Union[str, Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
__UpperCAmelCase : Optional[int] = (batch["image"] - mean) / std
with torch.no_grad():
__UpperCAmelCase : int = model(snake_case__ )
__UpperCAmelCase : Any = outputs.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = accelerator.gather_for_metrics((predictions, batch["label"]) )
__UpperCAmelCase : int = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__UpperCAmelCase : Any = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(snake_case__ ),
"epoch": epoch,
}, step=snake_case__, )
if checkpointing_steps == "epoch":
__UpperCAmelCase : Tuple = f'''epoch_{epoch}'''
if args.output_dir is not None:
__UpperCAmelCase : List[str] = os.path.join(args.output_dir, snake_case__ )
accelerator.save_state(snake_case__ )
if args.with_tracking:
accelerator.end_training()
def _UpperCamelCase ( ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=snake_case__, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=snake_case__, default=snake_case__, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=snake_case__, default=snake_case__, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=snake_case__, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=snake_case__, default=snake_case__, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=snake_case__, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
__UpperCAmelCase : List[str] = parser.parse_args()
__UpperCAmelCase : Tuple = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(snake_case__, snake_case__ )
if __name__ == "__main__":
main()
| 157 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
if num < 0:
return False
_a = num
_a = 0
while num > 0:
_a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( lowerCamelCase_ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> str:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
__UpperCamelCase : Tuple = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
__UpperCamelCase : str = dict(scheduler.config )
__UpperCamelCase : List[str] = 1
__UpperCamelCase : Tuple = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
__UpperCamelCase : Dict = (
f"The configuration file of this scheduler: {scheduler} has not set the configuration"
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
__UpperCamelCase : Optional[int] = dict(scheduler.config )
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : int = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def a_ (self , _UpperCAmelCase = "auto" ) -> List[str]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__UpperCamelCase : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def a_ (self ) -> List[Any]:
self.enable_attention_slicing(__a )
def a_ (self ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__UpperCamelCase : Optional[Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a_ (self ) -> Optional[int]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 5_1_2 , _UpperCAmelCase = 5_1_2 , _UpperCAmelCase = 5_0 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ) -> List[Any]:
__UpperCamelCase : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
__UpperCamelCase : List[Any] = self.segmentation_model(**__a )
__UpperCamelCase : Union[str, Any] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__UpperCamelCase : Optional[int] = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__UpperCamelCase : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 298 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.