code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
__UpperCAmelCase : Dict = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCAmelCase : Any = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCAmelCase : Optional[int] = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def UpperCAmelCase__ ( self : Any , A : Optional[int] , A : Optional[Any] , A : Optional[int]=None , A : Optional[int]=1 , A : str="binary" , A : Tuple=None , A : Dict="warn" , ):
__snake_case: List[str] = recall_score(
A , A , labels=A , pos_label=A , average=A , sample_weight=A , zero_division=A , )
return {"recall": float(A ) if score.size == 1 else score}
| 364
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case: Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase__ ( self : str ):
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self : List[str] , A : Union[str, List[str]] , A : int = 512 , A : int = 512 , A : int = 50 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , A : Optional[torch.FloatTensor] = None , **A : Optional[Any] , ):
if isinstance(A , A ):
__snake_case: int = 1
elif isinstance(A , A ):
__snake_case: Optional[Any] = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
__snake_case: Tuple = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case: Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case: List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case: Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case: Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case: List[Any] = text_embeddings.shape
__snake_case: Tuple = text_embeddings.repeat(1 , A , 1 )
__snake_case: Dict = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case: List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case: List[str]
if negative_prompt is None:
__snake_case: Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
__snake_case: List[str] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case: str = negative_prompt
__snake_case: Any = text_input_ids.shape[-1]
__snake_case: Dict = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
__snake_case: Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case: Optional[Any] = uncond_embeddings.shape[1]
__snake_case: str = uncond_embeddings.repeat(A , A , 1 )
__snake_case: List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case: Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case: Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case: List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case: Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case: Any = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
__snake_case: Tuple = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
__snake_case: Dict = torch.randn(
A , generator=A , device=self.device , dtype=A )
__snake_case: Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case: Optional[int] = latents_reference.to(self.device )
__snake_case: List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case: int = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case: Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case: int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case: Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case: List[Any] = 0 if dx < 0 else dx
__snake_case: Dict = 0 if dy < 0 else dy
__snake_case: List[str] = max(-dx , 0 )
__snake_case: int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case: List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case: str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case: Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case: Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case: int = {}
if accepts_eta:
__snake_case: Optional[Any] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
__snake_case: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case: Dict = self.scheduler.scale_model_input(A , A )
# predict the noise residual
__snake_case: List[Any] = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case: Any = noise_pred.chunk(2 )
__snake_case: Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case: str = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
__snake_case: Optional[int] = 1 / 0.1_8215 * latents
__snake_case: List[Any] = self.vae.decode(A ).sample
__snake_case: str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case: List[Any] = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
__snake_case , __snake_case: List[str] = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case: Optional[int] = None
if output_type == "pil":
__snake_case: Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 293
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
__UpperCAmelCase : str = {}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """llama"""
lowerCAmelCase__ = ["""past_key_values"""]
def __init__( self : int , A : str=32_000 , A : Tuple=4_096 , A : Dict=11_008 , A : Optional[Any]=32 , A : List[Any]=32 , A : Optional[int]=None , A : Dict="silu" , A : List[Any]=2_048 , A : int=0.02 , A : List[str]=1E-6 , A : Optional[Any]=True , A : List[str]=0 , A : Any=1 , A : Dict=2 , A : List[str]=1 , A : Dict=False , A : str=None , **A : Union[str, Any] , ):
__snake_case: Any = vocab_size
__snake_case: Any = max_position_embeddings
__snake_case: str = hidden_size
__snake_case: List[Any] = intermediate_size
__snake_case: int = num_hidden_layers
__snake_case: List[str] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__snake_case: int = num_attention_heads
__snake_case: str = num_key_value_heads
__snake_case: List[Any] = hidden_act
__snake_case: Tuple = initializer_range
__snake_case: Optional[int] = rms_norm_eps
__snake_case: List[Any] = pretraining_tp
__snake_case: List[str] = use_cache
__snake_case: Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def UpperCAmelCase__ ( self : List[Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
__snake_case: str = self.rope_scaling.get("""type""" , A )
__snake_case: List[Any] = self.rope_scaling.get("""factor""" , A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(A , A ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 365
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase : Optional[int] = "\\n\n"
__UpperCAmelCase : Tuple = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
__UpperCAmelCase : Tuple = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def UpperCAmelCase__ ( self : int , A : str , A : Optional[Any] , A : int = 16 , A : bool = True , A : Optional[int]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__snake_case: Optional[Any] = """cuda"""
else:
__snake_case: str = """cuda""" if torch.cuda.is_available() else """cpu"""
__snake_case: Dict = AutoModelForCausalLM.from_pretrained(A )
__snake_case: List[str] = model.to(A )
__snake_case: Optional[Any] = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__snake_case: Dict = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__snake_case: Tuple = model.config.max_length - 1
else:
__snake_case: Optional[Any] = model.config.max_length
__snake_case: Optional[int] = tokenizer(
A , add_special_tokens=A , padding=A , truncation=A , max_length=A , return_tensors="""pt""" , return_attention_mask=A , ).to(A )
__snake_case: Tuple = encodings["""input_ids"""]
__snake_case: Any = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__snake_case: Optional[int] = []
__snake_case: Optional[int] = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(A ) , A ) ):
__snake_case: Dict = min(start_index + batch_size , len(A ) )
__snake_case: Optional[int] = encoded_texts[start_index:end_index]
__snake_case: List[Any] = attn_masks[start_index:end_index]
if add_start_token:
__snake_case: Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
__snake_case: Optional[Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__snake_case: Union[str, Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A ), attn_mask] , dim=1 )
__snake_case: List[str] = encoded_batch
with torch.no_grad():
__snake_case: Union[str, Any] = model(A , attention_mask=A ).logits
__snake_case: List[str] = out_logits[..., :-1, :].contiguous()
__snake_case: Optional[Any] = labels[..., 1:].contiguous()
__snake_case: Dict = attn_mask[..., 1:].contiguous()
__snake_case: Optional[Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 293
| 0
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__UpperCAmelCase : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
if args.student_type == "roberta":
__snake_case: Optional[Any] = False
elif args.student_type == "gpt2":
__snake_case: str = False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
if args.student_type == "roberta":
__snake_case: Optional[int] = False
def A__ ( ) -> Tuple:
__snake_case: Optional[int] = argparse.ArgumentParser(description="""Training""")
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""")
parser.add_argument(
"""--dump_path""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The output directory (log, checkpoints, parameters, etc.)""")
parser.add_argument(
"""--data_file""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=SCREAMING_SNAKE_CASE__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to the student configuration.""")
parser.add_argument(
"""--student_pretrained_weights""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Load student initialization checkpoint.""")
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""Teacher type (BERT, RoBERTa).""")
parser.add_argument("""--teacher_name""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The teacher model.""")
parser.add_argument("""--temperature""" , default=2.0 , type=SCREAMING_SNAKE_CASE__ , help="""Temperature for the softmax temperature.""")
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the distillation loss. Must be >=0.""")
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the CLM loss. Must be >=0.""")
parser.add_argument("""--alpha_mse""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the MSE loss. Must be >=0.""")
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""")
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""")
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to mask out.""")
parser.add_argument("""--word_keep""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to keep.""")
parser.add_argument("""--word_rand""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to randomly replace.""")
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=SCREAMING_SNAKE_CASE__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=SCREAMING_SNAKE_CASE__ , help="""The token counts in the data_file for MLM.""")
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="""Number of pass on the whole dataset.""")
parser.add_argument("""--batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""Batch size (for each process).""")
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=SCREAMING_SNAKE_CASE__ , help="""Linear warmup proportion.""")
parser.add_argument("""--weight_decay""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Weight decay if we apply some.""")
parser.add_argument("""--learning_rate""" , default=5e-4 , type=SCREAMING_SNAKE_CASE__ , help="""The initial learning rate for Adam.""")
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=SCREAMING_SNAKE_CASE__ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm.""")
parser.add_argument("""--initializer_range""" , default=0.02 , type=SCREAMING_SNAKE_CASE__ , help="""Random initialization range.""")
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of GPUs in the node.""")
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Distributed training - Local rank""")
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=56 , help="""Random seed""")
parser.add_argument("""--log_interval""" , type=SCREAMING_SNAKE_CASE__ , default=500 , help="""Tensorboard logging interval.""")
parser.add_argument("""--checkpoint_interval""" , type=SCREAMING_SNAKE_CASE__ , default=4000 , help="""Checkpoint interval.""")
__snake_case: List[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE__)
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE__)
set_seed(SCREAMING_SNAKE_CASE__)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""")
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''')
# SAVE PARAMS #
logger.info(F'''Param: {args}''')
with open(os.path.join(args.dump_path , """parameters.json""") , """w""") as f:
json.dump(vars(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , indent=4)
git_log(args.dump_path)
__snake_case: str = MODEL_CLASSES[args.student_type]
__snake_case: Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__snake_case: Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name)
__snake_case: str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__snake_case: List[str] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''')
__snake_case: Optional[Any] = special_tok_ids
__snake_case: List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file , """rb""") as fp:
__snake_case: int = pickle.load(SCREAMING_SNAKE_CASE__)
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''')
with open(args.token_counts , """rb""") as fp:
__snake_case: List[str] = pickle.load(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = np.maximum(SCREAMING_SNAKE_CASE__ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__snake_case: Union[str, Any] = 0.0 # do not predict special tokens
__snake_case: Any = torch.from_numpy(SCREAMING_SNAKE_CASE__)
else:
__snake_case: Any = None
__snake_case: Union[str, Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE__ , data=SCREAMING_SNAKE_CASE__)
logger.info("""Data loader created.""")
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''')
__snake_case: Tuple = student_config_class.from_pretrained(args.student_config)
__snake_case: List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''')
__snake_case: Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE__)
else:
__snake_case: Union[str, Any] = student_model_class(SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''')
logger.info("""Student loaded.""")
# TEACHER #
__snake_case: Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''')
logger.info(F'''Teacher loaded from {args.teacher_name}.''')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__snake_case: List[str] = Distiller(
params=SCREAMING_SNAKE_CASE__ , dataset=SCREAMING_SNAKE_CASE__ , token_probs=SCREAMING_SNAKE_CASE__ , student=SCREAMING_SNAKE_CASE__ , teacher=SCREAMING_SNAKE_CASE__)
distiller.train()
logger.info("""Let's go get some drinks.""")
if __name__ == "__main__":
main()
| 366
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """vivit"""
def __init__( self : List[str] , A : int=224 , A : Optional[Any]=32 , A : Any=[2, 16, 16] , A : str=3 , A : Optional[Any]=768 , A : Any=12 , A : str=12 , A : Dict=3_072 , A : List[str]="gelu_fast" , A : List[str]=0.0 , A : str=0.0 , A : Dict=0.02 , A : int=1E-06 , A : Union[str, Any]=True , **A : Tuple , ):
__snake_case: int = hidden_size
__snake_case: Any = num_hidden_layers
__snake_case: Optional[Any] = num_attention_heads
__snake_case: str = intermediate_size
__snake_case: Tuple = hidden_act
__snake_case: Dict = hidden_dropout_prob
__snake_case: Tuple = attention_probs_dropout_prob
__snake_case: int = initializer_range
__snake_case: List[str] = layer_norm_eps
__snake_case: Dict = image_size
__snake_case: Optional[int] = num_frames
__snake_case: Dict = tubelet_size
__snake_case: Any = num_channels
__snake_case: Dict = qkv_bias
super().__init__(**A )
| 367
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """num_attention_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self : int , A : str , A : Dict=13 , A : str=32 , A : Any=2 , A : Optional[Any]=3 , A : str=640 , A : Tuple=4 , A : Dict="silu" , A : List[Any]=3 , A : Any=32 , A : Any=0.1 , A : int=0.1 , A : Dict=0.1 , A : Optional[Any]=0.02 , A : List[Any]=True , A : Tuple=True , A : Any=10 , A : Optional[int]=None , ):
__snake_case: List[Any] = parent
__snake_case: Dict = batch_size
__snake_case: int = image_size
__snake_case: Tuple = patch_size
__snake_case: Tuple = num_channels
__snake_case: str = last_hidden_size
__snake_case: Dict = num_attention_heads
__snake_case: Dict = hidden_act
__snake_case: Tuple = conv_kernel_size
__snake_case: List[str] = output_stride
__snake_case: List[str] = hidden_dropout_prob
__snake_case: Optional[Any] = attention_probs_dropout_prob
__snake_case: int = classifier_dropout_prob
__snake_case: List[Any] = use_labels
__snake_case: Union[str, Any] = is_training
__snake_case: Union[str, Any] = num_labels
__snake_case: str = initializer_range
__snake_case: List[Any] = scope
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case: Tuple = None
__snake_case: Any = None
if self.use_labels:
__snake_case: Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case: str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case: Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : int ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : str , A : Optional[Any] , A : Any , A : Any , A : Union[str, Any] ):
__snake_case: List[Any] = MobileViTModel(config=A )
model.to(A )
model.eval()
__snake_case: int = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : str , A : List[Any] , A : Any , A : Any , A : int ):
__snake_case: str = self.num_labels
__snake_case: Optional[int] = MobileViTForImageClassification(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[Any] , A : int , A : str ):
__snake_case: List[Any] = self.num_labels
__snake_case: Dict = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case: Tuple = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case: Any = config_and_inputs
__snake_case: Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = MobileViTModelTester(self )
__snake_case: str = MobileViTConfigTester(self , config_class=A , has_text_modality=A )
def UpperCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = model_class(A )
__snake_case: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case: Optional[int] = [*signature.parameters.keys()]
__snake_case: List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ ( self : Dict ):
def check_hidden_states_output(A : List[Any] , A : int , A : Tuple ):
__snake_case: List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__snake_case: str = model(**self._prepare_for_class(A , A ) )
__snake_case: Optional[int] = outputs.hidden_states
__snake_case: Any = 5
self.assertEqual(len(A ) , A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case: Union[str, Any] = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case: Dict = True
check_hidden_states_output(A , A , A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case: List[Any] = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def A__ ( ) -> Optional[int]:
__snake_case: Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase__ ( self : Dict ):
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Tuple = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(A )
__snake_case: str = self.default_image_processor
__snake_case: Optional[Any] = prepare_img()
__snake_case: List[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
# verify the logits
__snake_case: List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A )
__snake_case: Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = model.to(A )
__snake_case: Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[Any] = prepare_img()
__snake_case: List[str] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: List[Any] = model(**A )
__snake_case: Optional[int] = outputs.logits
# verify the logits
__snake_case: Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A )
__snake_case: Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: str = model.to(A )
__snake_case: Optional[Any] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = prepare_img()
__snake_case: Optional[int] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
__snake_case: List[Any] = outputs.logits.detach().cpu()
__snake_case: List[str] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)] )
__snake_case: str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A )
__snake_case: int = image_processor.post_process_semantic_segmentation(outputs=A )
__snake_case: Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A )
| 293
| 0
|
def A__ ( SCREAMING_SNAKE_CASE__) -> list:
if len(SCREAMING_SNAKE_CASE__) <= 1:
return [tuple(SCREAMING_SNAKE_CASE__)]
__snake_case: Optional[int] = []
def generate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Dict = [0] * n
res.append(tuple(SCREAMING_SNAKE_CASE__))
__snake_case: int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
__snake_case: Any = arr[i], arr[0]
else:
__snake_case: str = arr[i], arr[c[i]]
res.append(tuple(SCREAMING_SNAKE_CASE__))
c[i] += 1
__snake_case: str = 0
else:
__snake_case: List[str] = 0
i += 1
generate(len(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__)
return res
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase : int = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 368
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : List[str] ):
__snake_case , __snake_case: List[str] = super().prepare_init_args_and_inputs_for_common()
__snake_case: List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : int ):
__snake_case , __snake_case: Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Any ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: str = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[str] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
__snake_case: List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: int = super().prepare_init_args_and_inputs_for_common()
__snake_case: int = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[int] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ):
__snake_case: Any = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(A )
| 293
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = KandinskyImgaImgPipeline
lowerCAmelCase__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
lowerCAmelCase__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
lowerCAmelCase__ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase__ = False
@property
def UpperCAmelCase__ ( self : Dict ):
return 32
@property
def UpperCAmelCase__ ( self : Any ):
return 32
@property
def UpperCAmelCase__ ( self : str ):
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Any ):
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ):
return 100
@property
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def UpperCAmelCase__ ( self : Tuple ):
torch.manual_seed(0 )
__snake_case: str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
__snake_case: Any = MultilingualCLIP(A )
__snake_case: List[Any] = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
__snake_case: str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__snake_case: Union[str, Any] = UNetaDConditionModel(**A )
return model
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
__snake_case: Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = self.dummy_text_encoder
__snake_case: Optional[Any] = self.dummy_tokenizer
__snake_case: Dict = self.dummy_unet
__snake_case: Optional[int] = self.dummy_movq
__snake_case: Dict = {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__snake_case: List[str] = DDIMScheduler(**A )
__snake_case: Union[str, Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase__ ( self : Optional[int] , A : Optional[Any] , A : Tuple=0 ):
__snake_case: List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A ) ).to(A )
__snake_case: Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A )
# create init_image
__snake_case: int = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
__snake_case: Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case: Dict = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((256, 256) )
if str(A ).startswith("""mps""" ):
__snake_case: List[Any] = torch.manual_seed(A )
else:
__snake_case: Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
__snake_case: Any = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[Any] = """cpu"""
__snake_case: int = self.get_dummy_components()
__snake_case: List[str] = self.pipeline_class(**A )
__snake_case: List[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
__snake_case: int = pipe(**self.get_dummy_inputs(A ) )
__snake_case: Optional[int] = output.images
__snake_case: Union[str, Any] = pipe(
**self.get_dummy_inputs(A ) , return_dict=A , )[0]
__snake_case: Tuple = image[0, -3:, -3:, -1]
__snake_case: int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case: int = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
__snake_case: List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__snake_case: int = """A red cartoon frog, 4k"""
__snake_case: Any = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(A )
__snake_case: List[Any] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
__snake_case: List[str] = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
__snake_case: Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
__snake_case: Tuple = pipe_prior(
A , generator=A , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__snake_case: int = pipeline(
A , image=A , image_embeds=A , negative_image_embeds=A , generator=A , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
__snake_case: List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A , A )
| 369
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = ort.SessionOptions()
__snake_case: List[Any] = False
return options
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: int = """A red cat sitting on a park bench"""
__snake_case: Any = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=10 , generator=A , output_type="""np""" , )
__snake_case: List[Any] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: Optional[int] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
__snake_case: List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=A , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: Optional[int] = """A red cat sitting on a park bench"""
__snake_case: Dict = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=20 , generator=A , output_type="""np""" , )
__snake_case: List[str] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Union[str, Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 293
| 0
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A__ ( SCREAMING_SNAKE_CASE__) -> list[list[float]]:
__snake_case: Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE__) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
# Calculate the determinant of the matrix
__snake_case: Tuple = float(
d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creates a copy of the matrix with swapped positions of the elements
__snake_case: Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
__snake_case: Optional[Any] = matrix[1][1], matrix[0][0]
__snake_case: Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE__)) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE__) == 3
and len(matrix[0]) == 3
and len(matrix[1]) == 3
and len(matrix[2]) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__snake_case: Any = float(
(
(d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
)
- (
(d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creating cofactor matrix
__snake_case: Tuple = [
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
]
__snake_case: Dict = (d(matrix[1][1]) * d(matrix[2][2])) - (
d(matrix[1][2]) * d(matrix[2][1])
)
__snake_case: Tuple = -(
(d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
)
__snake_case: Optional[int] = (d(matrix[1][0]) * d(matrix[2][1])) - (
d(matrix[1][1]) * d(matrix[2][0])
)
__snake_case: Union[str, Any] = -(
(d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
)
__snake_case: str = (d(matrix[0][0]) * d(matrix[2][2])) - (
d(matrix[0][2]) * d(matrix[2][0])
)
__snake_case: List[Any] = -(
(d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
)
__snake_case: Optional[Any] = (d(matrix[0][1]) * d(matrix[1][2])) - (
d(matrix[0][2]) * d(matrix[1][1])
)
__snake_case: List[str] = -(
(d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
)
__snake_case: Optional[int] = (d(matrix[0][0]) * d(matrix[1][1])) - (
d(matrix[0][1]) * d(matrix[1][0])
)
# Transpose the cofactor matrix (Adjoint matrix)
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
__snake_case: Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE__)
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE__)) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""")
| 370
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A__ ( SCREAMING_SNAKE_CASE__ = 3) -> qiskit.result.counts.Counts:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
raise TypeError("""number of qubits must be a integer.""")
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""")
if math.floor(SCREAMING_SNAKE_CASE__) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""")
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""")
__snake_case: int = QuantumRegister(SCREAMING_SNAKE_CASE__ , """qr""")
__snake_case: List[str] = ClassicalRegister(SCREAMING_SNAKE_CASE__ , """cr""")
__snake_case: Optional[Any] = QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.h(number_of_qubits - i - 1)
counter -= 1
for j in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
for k in range(number_of_qubits // 2):
quantum_circuit.swap(SCREAMING_SNAKE_CASE__ , number_of_qubits - k - 1)
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# simulate with 10000 shots
__snake_case: Union[str, Any] = Aer.get_backend("""qasm_simulator""")
__snake_case: Optional[Any] = execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1_0000)
return job.result().get_counts(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 293
| 0
|
from collections import Counter
from timeit import timeit
def A__ ( SCREAMING_SNAKE_CASE__ = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""").lower()).values()) < 2
def A__ ( SCREAMING_SNAKE_CASE__ = "") -> bool:
if len(SCREAMING_SNAKE_CASE__) == 0:
return True
__snake_case: Optional[Any] = input_str.replace(""" """ , """""").lower()
# character_freq_dict: Stores the frequency of every character in the input string
__snake_case: dict[str, int] = {}
for character in lower_case_input_str:
__snake_case: int = character_freq_dict.get(SCREAMING_SNAKE_CASE__ , 0) + 1
__snake_case: List[str] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE__ = "") -> None:
print("""\nFor string = """ , SCREAMING_SNAKE_CASE__ , """:""")
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(SCREAMING_SNAKE_CASE__) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(SCREAMING_SNAKE_CASE__) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
__UpperCAmelCase : int = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
__UpperCAmelCase : int = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 371
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 293
| 0
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def A__ ( SCREAMING_SNAKE_CASE__) -> Any:
__snake_case: str = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__)
__snake_case: int = flatten_dict(SCREAMING_SNAKE_CASE__)
return flax_params
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
__snake_case: str = {}
__snake_case: List[Any] = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
__snake_case: Dict = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__snake_case: List[Any] = """.""".join(key[1:])
# rename the key
for old, new in CONVERSION_MAPPING.items():
__snake_case: Dict = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__snake_case: int = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__snake_case: int = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , SCREAMING_SNAKE_CASE__)
__snake_case: Union[str, Any] = new_key.replace("""encoder""" , """encoder.encoder""")
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__snake_case: Dict = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = flax_dict[key]
__snake_case: Optional[int] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__snake_case: List[Any] = torch.from_numpy(converted_dict[key].T)
else:
__snake_case: Optional[Any] = torch.from_numpy(converted_dict[key])
return converted_torch_dict
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False) -> int:
__snake_case: Dict = get_flax_param(SCREAMING_SNAKE_CASE__)
if not use_large:
__snake_case: Optional[Any] = PixaStructVisionConfig()
__snake_case: Optional[int] = PixaStructTextConfig()
else:
__snake_case: Optional[Any] = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18)
__snake_case: Optional[Any] = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18)
__snake_case: str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__)
__snake_case: Union[str, Any] = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__)
model.load_state_dict(SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""")
__snake_case: Union[str, Any] = PixaStructImageProcessor()
__snake_case: Union[str, Any] = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__)
if use_large:
__snake_case: Dict = 4096
__snake_case: Tuple = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__)
model.save_pretrained(SCREAMING_SNAKE_CASE__)
processor.save_pretrained(SCREAMING_SNAKE_CASE__)
print("""Model saved in {}""".format(SCREAMING_SNAKE_CASE__))
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
__UpperCAmelCase : List[str] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , *A : Dict , A : Optional[int]=None , A : Tuple=None , **A : Optional[int] ):
super().__init__(*A , **A )
__snake_case: List[Any] = eval_examples
__snake_case: str = post_process_function
def UpperCAmelCase__ ( self : List[Any] , A : Dict=None , A : int=None , A : List[Any]=None , A : str = "eval" ):
__snake_case: int = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case: Any = self.get_eval_dataloader(A )
__snake_case: Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Union[str, Any] = self.compute_metrics
__snake_case: List[str] = None
__snake_case: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Tuple = time.time()
try:
__snake_case: Any = eval_loop(
A , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: Optional[int] = compute_metrics
__snake_case: Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case: List[str] = self.post_process_function(A , A , output.predictions )
__snake_case: List[Any] = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: str = metrics.pop(A )
metrics.update(output.metrics )
else:
__snake_case: List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case: str = self.callback_handler.on_evaluate(self.args , self.state , self.control , A )
return metrics
def UpperCAmelCase__ ( self : Optional[Any] , A : List[Any] , A : List[str] , A : str=None , A : str = "test" ):
__snake_case: Optional[Any] = self.get_test_dataloader(A )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Optional[int] = self.compute_metrics
__snake_case: List[Any] = None
__snake_case: str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Dict = time.time()
try:
__snake_case: str = eval_loop(
A , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: List[Any] = compute_metrics
__snake_case: Dict = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case: Union[str, Any] = self.post_process_function(A , A , output.predictions , """predict""" )
__snake_case: str = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: List[str] = metrics.pop(A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A )
| 293
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase : Optional[Any] = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 351
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """rwkv"""
lowerCAmelCase__ = {"""max_position_embeddings""": """context_length"""}
def __init__( self : Dict , A : List[Any]=50_277 , A : List[Any]=1_024 , A : Union[str, Any]=4_096 , A : Tuple=32 , A : List[Any]=None , A : Tuple=None , A : Tuple=1E-5 , A : int=0 , A : Optional[int]=0 , A : Dict=6 , A : Dict=False , A : int=True , **A : List[Any] , ):
__snake_case: Tuple = vocab_size
__snake_case: Any = context_length
__snake_case: Dict = hidden_size
__snake_case: Dict = num_hidden_layers
__snake_case: Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
__snake_case: str = intermediate_size if intermediate_size is not None else 4 * hidden_size
__snake_case: Any = layer_norm_epsilon
__snake_case: int = rescale_every
__snake_case: str = use_cache
__snake_case: Dict = bos_token_id
__snake_case: Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=A , bos_token_id=A , eos_token_id=A , **A )
| 293
| 0
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
__UpperCAmelCase : Tuple = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
__UpperCAmelCase : str = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CamembertTokenizer
lowerCAmelCase__ = CamembertTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Union[str, Any] = CamembertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = """<pad>"""
__snake_case: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_004 )
def UpperCAmelCase__ ( self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def UpperCAmelCase__ ( self : str ):
__snake_case: int = CamembertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
__snake_case: Dict = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__snake_case: Union[str, Any] = """I was born in 92000, and this is falsé."""
__snake_case: List[Any] = tokenizer.encode(A )
__snake_case: Tuple = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
__snake_case: Dict = tokenizer.encode(A , add_special_tokens=A )
__snake_case: Union[str, Any] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__snake_case: Tuple = tokenizer.convert_ids_to_tokens(A )
__snake_case: Any = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ ( self : List[str] ):
if not self.test_rust_tokenizer:
return
__snake_case: Any = self.get_tokenizer()
__snake_case: Tuple = self.get_rust_tokenizer()
__snake_case: List[str] = """I was born in 92000, and this is falsé."""
__snake_case: Optional[int] = tokenizer.tokenize(A )
__snake_case: str = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
__snake_case: str = tokenizer.encode(A , add_special_tokens=A )
__snake_case: Dict = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
__snake_case: Tuple = self.get_rust_tokenizer()
__snake_case: str = tokenizer.encode(A )
__snake_case: Any = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
@slow
def UpperCAmelCase__ ( self : List[str] ):
# fmt: off
__snake_case: Tuple = {"""input_ids""": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__snake_case: Optional[Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=A , )
| 352
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase : Any = 250_004
__UpperCAmelCase : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MBartaaTokenizer
lowerCAmelCase__ = MBartaaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Optional[int] = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = """<s>"""
__snake_case: Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_054 )
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Dict = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
__snake_case: int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case: Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case: List[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case: int = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
# fmt: off
__snake_case: List[str] = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case: Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Optional[int] = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Tuple = tokenizer_r.save_pretrained(A )
__snake_case: Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__snake_case: Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: Tuple = tokenizer_r.from_pretrained(A )
__snake_case: Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
__snake_case: Tuple = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: List[str] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: List[Any] = tokenizer_r.from_pretrained(A )
__snake_case: Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: Dict = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case: Any = tokenizer_r.from_pretrained(A )
__snake_case: Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """facebook/mbart-large-50-one-to-many-mmt"""
lowerCAmelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCAmelCase__ ( cls : int ):
__snake_case: MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__snake_case: str = 1
return cls
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250_038 )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertIn(A , self.tokenizer.all_special_ids )
__snake_case: Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__snake_case: str = self.tokenizer.decode(A , skip_special_tokens=A )
__snake_case: Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
__snake_case: Union[str, Any] = 10
__snake_case: List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[0] , A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(A ) , A )
def UpperCAmelCase__ ( self : Tuple ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_053, 250_001] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = tempfile.mkdtemp()
__snake_case: Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
__snake_case: Union[str, Any] = MBartaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
__snake_case: List[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__snake_case: Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case: List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
__snake_case: Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
__snake_case: Dict = targets["""input_ids"""]
__snake_case: Any = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 293
| 0
|
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __snake_case :
'''simple docstring'''
def __init__( self : Dict , A : Optional[Any] , A : int , A : int ):
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
__snake_case: List[str] = img
__snake_case: Dict = img.shape[1]
__snake_case: Dict = img.shape[0]
__snake_case: Tuple = dst_width
__snake_case: int = dst_height
__snake_case: List[Any] = self.src_w / self.dst_w
__snake_case: Dict = self.src_h / self.dst_h
__snake_case: Optional[int] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def UpperCAmelCase__ ( self : Tuple ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__snake_case: Tuple = self.img[self.get_y(A )][self.get_x(A )]
def UpperCAmelCase__ ( self : str , A : int ):
return int(self.ratio_x * x )
def UpperCAmelCase__ ( self : Any , A : int ):
return int(self.ratio_y * y )
if __name__ == "__main__":
__UpperCAmelCase : List[str] = 800, 600
__UpperCAmelCase : Any = imread("image_data/lena.jpg", 1)
__UpperCAmelCase : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 353
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCAmelCase : str = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Any , A : int , A : int , A : float , **A : Optional[int] ):
__snake_case: List[str] = feature_size
__snake_case: Optional[int] = sampling_rate
__snake_case: Any = padding_value
__snake_case: Dict = kwargs.pop("""padding_side""" , """right""" )
__snake_case: Union[str, Any] = kwargs.pop("""return_attention_mask""" , A )
super().__init__(**A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A : Union[bool, str, PaddingStrategy] = True , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__snake_case: Optional[int] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
__snake_case: Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A ) == 0:
if return_attention_mask:
__snake_case: Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__snake_case: int = required_input[0]
if isinstance(A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__snake_case: Optional[int] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A ):
__snake_case: Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A ):
__snake_case: str = """tf"""
elif is_torch_tensor(A ):
__snake_case: str = """pt"""
elif isinstance(A , (int, float, list, tuple, np.ndarray) ):
__snake_case: List[str] = """np"""
else:
raise ValueError(
f'''type of {first_element} unknown: {type(A )}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__snake_case: List[Any] = to_numpy(A )
else:
__snake_case: Union[str, Any] = [to_numpy(A ) for v in value]
# Convert padding_strategy in PaddingStrategy
__snake_case: Union[str, Any] = self._get_padding_strategies(padding=A , max_length=A )
__snake_case: Any = processed_features[self.model_input_names[0]]
__snake_case: int = len(A )
if not all(len(A ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__snake_case: Union[str, Any] = []
for i in range(A ):
__snake_case: List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__snake_case: Tuple = self._truncate(
A , max_length=A , pad_to_multiple_of=A , truncation=A , )
truncated_inputs.append(A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__snake_case: Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__snake_case: List[str] = PaddingStrategy.MAX_LENGTH
__snake_case: List[Any] = {}
for i in range(A ):
# padding
__snake_case: Any = self._pad(
truncated_inputs[i] , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
for key, value in outputs.items():
if key not in batch_outputs:
__snake_case: Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
__snake_case: str = value.astype(np.floataa )
batch_outputs[key].append(A )
return BatchFeature(A , tensor_type=A )
def UpperCAmelCase__ ( self : int , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A : Optional[int] = None , A : Optional[bool] = None , ):
__snake_case: List[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__snake_case: List[str] = len(A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__snake_case: List[str] = np.ones(len(A ) , dtype=np.intaa )
if needs_to_be_padded:
__snake_case: Any = max_length - len(A )
if self.padding_side == "right":
if return_attention_mask:
__snake_case: Optional[int] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__snake_case: Any = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__snake_case: Union[str, Any] = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__snake_case: Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__snake_case: Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__snake_case: str = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : Optional[int] = None , A : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Tuple = len(A ) > max_length
if needs_to_be_truncated:
__snake_case: List[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__snake_case: int = processed_features["""attention_mask"""][:max_length]
return processed_features
def UpperCAmelCase__ ( self : int , A : int=False , A : int=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__snake_case: Optional[int] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A , A ):
__snake_case: Optional[int] = PaddingStrategy(A )
elif isinstance(A , A ):
__snake_case: Any = padding
else:
__snake_case: Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 293
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> Any:
if attention_mask is None:
__snake_case: Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id) , tf.inta)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __snake_case :
'''simple docstring'''
lowerCAmelCase__ = OPTConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = """gelu"""
def __init__( self : Tuple , A : Any , A : Optional[Any]=13 , A : List[str]=7 , A : List[str]=True , A : Optional[Any]=False , A : Dict=99 , A : List[Any]=16 , A : Union[str, Any]=2 , A : str=4 , A : Tuple=4 , A : Union[str, Any]="gelu" , A : Tuple=0.1 , A : List[Any]=0.1 , A : str=20 , A : List[str]=2 , A : Any=1 , A : Tuple=0 , A : List[str]=16 , A : Tuple=16 , ):
__snake_case: Tuple = parent
__snake_case: int = batch_size
__snake_case: Optional[int] = seq_length
__snake_case: Optional[int] = is_training
__snake_case: Union[str, Any] = use_labels
__snake_case: List[str] = vocab_size
__snake_case: Any = hidden_size
__snake_case: int = num_hidden_layers
__snake_case: str = num_attention_heads
__snake_case: List[Any] = intermediate_size
__snake_case: Union[str, Any] = hidden_act
__snake_case: Tuple = hidden_dropout_prob
__snake_case: Dict = attention_probs_dropout_prob
__snake_case: str = max_position_embeddings
__snake_case: Optional[int] = eos_token_id
__snake_case: List[Any] = pad_token_id
__snake_case: Tuple = bos_token_id
__snake_case: Tuple = embed_dim
__snake_case: Union[str, Any] = word_embed_proj_dim
__snake_case: List[str] = False
def UpperCAmelCase__ ( self : Any ):
__snake_case: Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__snake_case: Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__snake_case: Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
__snake_case: Dict = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=A , **self.config_updates , )
__snake_case: Dict = prepare_opt_inputs_dict(A , A )
return config, inputs_dict
def UpperCAmelCase__ ( self : Tuple , A : Optional[Any] , A : Optional[Any] ):
__snake_case: Dict = TFOPTModel(config=A )
__snake_case: List[str] = inputs_dict["""input_ids"""]
__snake_case: Dict = input_ids[:1, :]
__snake_case: int = inputs_dict["""attention_mask"""][:1, :]
__snake_case: List[Any] = 1
# first forward pass
__snake_case: Tuple = model(A , attention_mask=A , use_cache=A )
__snake_case: Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__snake_case: Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case: Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__snake_case: Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
__snake_case: List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__snake_case: Dict = model(A , attention_mask=A )[0]
__snake_case: Tuple = model(A , attention_mask=A , past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__snake_case: Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__snake_case: Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
__snake_case: Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A , A , rtol=1E-3 )
@require_tf
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCAmelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCAmelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 10
def UpperCAmelCase__ ( self : str ):
__snake_case: Dict = TFOPTModelTester(self )
__snake_case: Dict = ConfigTester(self , config_class=A )
def UpperCAmelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(A : Tuple , A : Any ):
if hasattr(A , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(A , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__snake_case: Optional[Any] = model_class(config=A )
__snake_case: List[str] = _get_word_embedding_weight(A , model.get_input_embeddings() )
__snake_case: Optional[Any] = _get_word_embedding_weight(A , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(A )
__snake_case: Optional[int] = _get_word_embedding_weight(A , model.get_input_embeddings() )
__snake_case: Dict = _get_word_embedding_weight(A , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__snake_case: str = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , A )
# check that weights remain the same after resizing
__snake_case: str = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__snake_case: int = False
self.assertTrue(A )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , A )
__snake_case: List[str] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__snake_case: List[Any] = False
self.assertTrue(A )
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[int]:
return tf.constant(SCREAMING_SNAKE_CASE__ , dtype=tf.intaa)
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = 99
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__snake_case: Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__snake_case: List[str] = input_ids.shape[0]
__snake_case: Optional[int] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Dict = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
__snake_case: int = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__snake_case: Tuple = tf.not_equal(A , model.config.pad_token_id )
with tf.GradientTape():
__snake_case: int = model(input_ids=A , attention_mask=A ).last_hidden_state
__snake_case: Optional[int] = (1, 11, 512)
self.assertEqual(output.shape , A )
__snake_case: int = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , A , atol=4E-3 ) )
__snake_case: Dict = tf.function(A , jit_compile=A )
__snake_case: Optional[int] = xla_generate(A , A )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , A , atol=4E-2 ) )
@require_tf
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[Any] ):
super().setUp()
__snake_case: int = """facebook/opt-350m"""
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: int = TFOPTForCausalLM.from_pretrained(self.path_model )
__snake_case: Union[str, Any] = GPTaTokenizer.from_pretrained(self.path_model )
__snake_case: Dict = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__snake_case: List[str] = tokenizer(A , return_tensors="""tf""" , padding=A , add_special_tokens=A )
__snake_case: Union[str, Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__snake_case: Union[str, Any] = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(A , A , atol=1E-4 ) )
__snake_case: List[Any] = tf.function(A , jit_compile=A )
__snake_case: int = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(A , A , atol=1E-4 ) )
@require_tf
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : int ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: str = """facebook/opt-125m"""
__snake_case: int = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__snake_case: Any = []
__snake_case: List[Any] = GPTaTokenizer.from_pretrained(A )
__snake_case: Optional[Any] = TFOPTForCausalLM.from_pretrained(A )
for prompt in self.prompts:
__snake_case: Optional[int] = tokenizer(A , return_tensors="""tf""" ).input_ids
__snake_case: Tuple = model.generate(A , max_length=10 )
__snake_case: Optional[int] = tokenizer.batch_decode(A , skip_special_tokens=A )
predicted_outputs += generated_string
self.assertListEqual(A , A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Tuple = """facebook/opt-350m"""
__snake_case: int = GPTaTokenizer.from_pretrained(A )
__snake_case: Tuple = TFOPTForCausalLM.from_pretrained(A )
__snake_case: Tuple = """left"""
# use different length sentences to test batching
__snake_case: Union[str, Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
__snake_case: Tuple = tokenizer(A , return_tensors="""tf""" , padding=A )
__snake_case: Dict = inputs["""input_ids"""]
__snake_case: Optional[int] = model.generate(input_ids=A , attention_mask=inputs["""attention_mask"""] )
__snake_case: Optional[Any] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
__snake_case: str = model.generate(input_ids=A )
__snake_case: Union[str, Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
__snake_case: List[Any] = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
__snake_case: List[Any] = model.generate(input_ids=A , max_length=model.config.max_length - num_paddings )
__snake_case: Union[str, Any] = tokenizer.batch_decode(A , skip_special_tokens=A )
__snake_case: Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A )
__snake_case: Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=A )
__snake_case: List[str] = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(A , A )
self.assertListEqual(A , [non_padded_sentence, padded_sentence] )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[str] = """facebook/opt-350m"""
__snake_case: str = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__snake_case: str = []
__snake_case: Dict = GPTaTokenizer.from_pretrained(A )
__snake_case: List[str] = TFOPTForCausalLM.from_pretrained(A )
for prompt in self.prompts:
__snake_case: Tuple = tokenizer(A , return_tensors="""tf""" ).input_ids
__snake_case: Optional[Any] = model.generate(A , max_length=10 )
__snake_case: Any = tokenizer.batch_decode(A , skip_special_tokens=A )
predicted_outputs += generated_string
self.assertListEqual(A , A )
| 354
|
from __future__ import annotations
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
return np.maximum(0 , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 293
| 0
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__UpperCAmelCase : Union[str, Any] = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
__UpperCAmelCase : Optional[Any] = dataset.iloc[:, 1:2].values
__UpperCAmelCase : str = dataset.iloc[:, 2].values
__UpperCAmelCase : Union[str, Any] = train_test_split(X, y, test_size=0.2, random_state=0)
__UpperCAmelCase : str = PolynomialFeatures(degree=4)
__UpperCAmelCase : Union[str, Any] = poly_reg.fit_transform(X)
__UpperCAmelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def A__ ( ) -> Tuple:
plt.scatter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , color="""red""")
plt.plot(SCREAMING_SNAKE_CASE__ , pol_reg.predict(poly_reg.fit_transform(SCREAMING_SNAKE_CASE__)) , color="""blue""")
plt.title("""Truth or Bluff (Linear Regression)""")
plt.xlabel("""Position level""")
plt.ylabel("""Salary""")
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 355
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("""foo.json""",)] )
def UpperCAmelCase__ ( self : List[str] , A : Optional[Any] ):
__snake_case: Any = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
__snake_case: Optional[int] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: str = AutoConfig.from_pretrained("""gpt2""" )
__snake_case: Any = GenerationConfig.from_model_config(A )
__snake_case: str = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Tuple = {
"""max_new_tokens""": 1_024,
"""foo""": """bar""",
}
__snake_case: List[str] = copy.deepcopy(A )
__snake_case: Optional[int] = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {"""foo""": """bar"""} )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Optional[int] = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(A )
__snake_case: Any = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__snake_case: int = GenerationConfig.from_model_config(A )
assert not hasattr(A , """foo""" ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
__snake_case: Tuple = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase__ ( cls : List[str] ):
__snake_case: Optional[int] = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[int] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__snake_case: str = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""test-generation-config""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__snake_case: int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[int] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 293
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self : Dict , A : UNetaDModel , A : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=A , scheduler=A )
@torch.no_grad()
def __call__( self : Union[str, Any] , A : int = 1 , A : int = 50 , A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A : Optional[str] = "pil" , A : bool = True , **A : Tuple , ):
__snake_case: List[Any] = self.unet.config.sample_size
__snake_case: Dict = (batch_size, 3, img_size, img_size)
__snake_case: List[Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
__snake_case: List[str] = randn_tensor(A , generator=A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
__snake_case: Dict = self.scheduler.schedule[t]
__snake_case: str = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
__snake_case: Any = self.scheduler.add_noise_to_input(A , A , generator=A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
__snake_case: int = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
__snake_case: Any = self.scheduler.step(A , A , A , A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
__snake_case: List[str] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
__snake_case: str = self.scheduler.step_correct(
A , A , A , A , step_output.prev_sample , step_output["""derivative"""] , )
__snake_case: Any = step_output.prev_sample
__snake_case: Union[str, Any] = (sample / 2 + 0.5).clamp(0 , 1 )
__snake_case: str = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case: List[Any] = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 356
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__UpperCAmelCase : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
if args.student_type == "roberta":
__snake_case: Optional[Any] = False
elif args.student_type == "gpt2":
__snake_case: str = False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
if args.student_type == "roberta":
__snake_case: Optional[int] = False
def A__ ( ) -> Tuple:
__snake_case: Optional[int] = argparse.ArgumentParser(description="""Training""")
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""")
parser.add_argument(
"""--dump_path""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The output directory (log, checkpoints, parameters, etc.)""")
parser.add_argument(
"""--data_file""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=SCREAMING_SNAKE_CASE__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to the student configuration.""")
parser.add_argument(
"""--student_pretrained_weights""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Load student initialization checkpoint.""")
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""Teacher type (BERT, RoBERTa).""")
parser.add_argument("""--teacher_name""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The teacher model.""")
parser.add_argument("""--temperature""" , default=2.0 , type=SCREAMING_SNAKE_CASE__ , help="""Temperature for the softmax temperature.""")
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the distillation loss. Must be >=0.""")
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the CLM loss. Must be >=0.""")
parser.add_argument("""--alpha_mse""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the MSE loss. Must be >=0.""")
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""")
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""")
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to mask out.""")
parser.add_argument("""--word_keep""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to keep.""")
parser.add_argument("""--word_rand""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to randomly replace.""")
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=SCREAMING_SNAKE_CASE__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=SCREAMING_SNAKE_CASE__ , help="""The token counts in the data_file for MLM.""")
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="""Number of pass on the whole dataset.""")
parser.add_argument("""--batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""Batch size (for each process).""")
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=SCREAMING_SNAKE_CASE__ , help="""Linear warmup proportion.""")
parser.add_argument("""--weight_decay""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Weight decay if we apply some.""")
parser.add_argument("""--learning_rate""" , default=5e-4 , type=SCREAMING_SNAKE_CASE__ , help="""The initial learning rate for Adam.""")
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=SCREAMING_SNAKE_CASE__ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm.""")
parser.add_argument("""--initializer_range""" , default=0.02 , type=SCREAMING_SNAKE_CASE__ , help="""Random initialization range.""")
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of GPUs in the node.""")
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Distributed training - Local rank""")
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=56 , help="""Random seed""")
parser.add_argument("""--log_interval""" , type=SCREAMING_SNAKE_CASE__ , default=500 , help="""Tensorboard logging interval.""")
parser.add_argument("""--checkpoint_interval""" , type=SCREAMING_SNAKE_CASE__ , default=4000 , help="""Checkpoint interval.""")
__snake_case: List[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE__)
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE__)
set_seed(SCREAMING_SNAKE_CASE__)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""")
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''')
# SAVE PARAMS #
logger.info(F'''Param: {args}''')
with open(os.path.join(args.dump_path , """parameters.json""") , """w""") as f:
json.dump(vars(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , indent=4)
git_log(args.dump_path)
__snake_case , __snake_case , __snake_case: str = MODEL_CLASSES[args.student_type]
__snake_case , __snake_case , __snake_case: Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__snake_case: Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name)
__snake_case: str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__snake_case: List[str] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''')
__snake_case: Optional[Any] = special_tok_ids
__snake_case: List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file , """rb""") as fp:
__snake_case: int = pickle.load(SCREAMING_SNAKE_CASE__)
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''')
with open(args.token_counts , """rb""") as fp:
__snake_case: List[str] = pickle.load(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = np.maximum(SCREAMING_SNAKE_CASE__ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__snake_case: Union[str, Any] = 0.0 # do not predict special tokens
__snake_case: Any = torch.from_numpy(SCREAMING_SNAKE_CASE__)
else:
__snake_case: Any = None
__snake_case: Union[str, Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE__ , data=SCREAMING_SNAKE_CASE__)
logger.info("""Data loader created.""")
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''')
__snake_case: Tuple = student_config_class.from_pretrained(args.student_config)
__snake_case: List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''')
__snake_case: Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE__)
else:
__snake_case: Union[str, Any] = student_model_class(SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''')
logger.info("""Student loaded.""")
# TEACHER #
__snake_case: Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''')
logger.info(F'''Teacher loaded from {args.teacher_name}.''')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__snake_case: List[str] = Distiller(
params=SCREAMING_SNAKE_CASE__ , dataset=SCREAMING_SNAKE_CASE__ , token_probs=SCREAMING_SNAKE_CASE__ , student=SCREAMING_SNAKE_CASE__ , teacher=SCREAMING_SNAKE_CASE__)
distiller.train()
logger.info("""Let's go get some drinks.""")
if __name__ == "__main__":
main()
| 293
| 0
|
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE__)
if number < 1:
__snake_case: Optional[int] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE__)
elif number == 1:
return 3
elif number == 2:
return 5
else:
__snake_case: List[Any] = int(math.log(number // 3 , 2)) + 2
__snake_case: str = [3, 5]
__snake_case: int = 2
__snake_case: List[str] = 3
for block in range(1 , SCREAMING_SNAKE_CASE__):
for _ in range(SCREAMING_SNAKE_CASE__):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__UpperCAmelCase : Optional[int] = 0
try:
__UpperCAmelCase : int = proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 357
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 293
| 0
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Dict = 10
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = [1, 2, 3, 4]
__snake_case: str = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(A , self.block_size , 0 ) , A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__snake_case: Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(A , self.block_size , 0 ) , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__snake_case: Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(A , self.block_size , 0 ) , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
__snake_case: int = process_story(A )
self.assertEqual(A , [] )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = """"""
__snake_case: Optional[Any] = process_story(A )
self.assertEqual(A , [] )
self.assertEqual(A , [] )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: int = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
__snake_case: int = process_story(A )
__snake_case: str = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(A , A )
__snake_case: List[Any] = ["""It was the best of times."""]
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = torch.tensor([1, 2, 3, 4] )
__snake_case: Tuple = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(A , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__snake_case: List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(A , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : str ):
__snake_case: Dict = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__snake_case: Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(A , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : str ):
__snake_case: Any = 101
__snake_case: Tuple = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__snake_case: List[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__snake_case: Tuple = compute_token_type_ids(A , A )
np.testing.assert_array_equal(A , A )
| 358
|
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase : int = "docs/source/en/_toctree.yml"
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__)
for doc in model_doc:
counts[doc["local"]] += 1
__snake_case: Dict = [key for key, value in counts.items() if value > 1]
__snake_case: Optional[Any] = []
for duplicate_key in duplicates:
__snake_case: Tuple = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key})
if len(SCREAMING_SNAKE_CASE__) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""")
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1])
# Sort
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__: s["title"].lower())
def A__ ( SCREAMING_SNAKE_CASE__=False) -> List[str]:
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""") as f:
__snake_case: Optional[int] = yaml.safe_load(f.read())
# Get to the API doc
__snake_case: Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case: str = content[api_idx]["""sections"""]
# Then to the model doc
__snake_case: List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__snake_case: Dict = api_doc[model_idx]["""sections"""]
__snake_case: int = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE__) if """sections""" in section]
__snake_case: Optional[int] = False
for idx, modality_doc in modalities_docs:
__snake_case: Dict = modality_doc["""sections"""]
__snake_case: List[str] = clean_model_doc_toc(SCREAMING_SNAKE_CASE__)
if old_modality_doc != new_modality_doc:
__snake_case: List[str] = True
if overwrite:
__snake_case: Dict = new_modality_doc
if diff:
if overwrite:
__snake_case: Dict = model_doc
__snake_case: int = api_doc
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""") as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE__ , allow_unicode=SCREAMING_SNAKE_CASE__))
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""")
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 293
| 0
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class __snake_case :
'''simple docstring'''
def __init__( self : List[Any] , A : List[str] , A : Any , A : bool = True , A : bool = False ):
__snake_case: Dict = scheduler
__snake_case: Tuple = optimizers if isinstance(A , (list, tuple) ) else [optimizers]
__snake_case: str = split_batches
__snake_case: str = step_with_optimizer
__snake_case: Dict = GradientState()
def UpperCAmelCase__ ( self : List[Any] , *A : Tuple , **A : int ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*A , **A )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*A , **A )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__snake_case: int = AcceleratorState().num_processes
for _ in range(A ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*A , **A )
else:
self.scheduler.step(*A , **A )
def UpperCAmelCase__ ( self : Tuple ):
return self.scheduler.get_last_lr()
def UpperCAmelCase__ ( self : Optional[Any] ):
return self.scheduler.state_dict()
def UpperCAmelCase__ ( self : Dict , A : Union[str, Any] ):
self.scheduler.load_state_dict(A )
def UpperCAmelCase__ ( self : List[Any] ):
return self.scheduler.get_lr()
def UpperCAmelCase__ ( self : List[str] , *A : Any , **A : Tuple ):
return self.scheduler.print_lr(*A , **A )
| 359
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A__ ( SCREAMING_SNAKE_CASE__) -> list[list[float]]:
__snake_case: Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE__) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
# Calculate the determinant of the matrix
__snake_case: Tuple = float(
d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creates a copy of the matrix with swapped positions of the elements
__snake_case: Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
__snake_case , __snake_case: Optional[Any] = matrix[1][1], matrix[0][0]
__snake_case , __snake_case: Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE__)) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE__) == 3
and len(matrix[0]) == 3
and len(matrix[1]) == 3
and len(matrix[2]) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__snake_case: Any = float(
(
(d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
)
- (
(d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creating cofactor matrix
__snake_case: Tuple = [
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
]
__snake_case: Dict = (d(matrix[1][1]) * d(matrix[2][2])) - (
d(matrix[1][2]) * d(matrix[2][1])
)
__snake_case: Tuple = -(
(d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
)
__snake_case: Optional[int] = (d(matrix[1][0]) * d(matrix[2][1])) - (
d(matrix[1][1]) * d(matrix[2][0])
)
__snake_case: Union[str, Any] = -(
(d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
)
__snake_case: str = (d(matrix[0][0]) * d(matrix[2][2])) - (
d(matrix[0][2]) * d(matrix[2][0])
)
__snake_case: List[Any] = -(
(d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
)
__snake_case: Optional[Any] = (d(matrix[0][1]) * d(matrix[1][2])) - (
d(matrix[0][2]) * d(matrix[1][1])
)
__snake_case: List[str] = -(
(d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
)
__snake_case: Optional[int] = (d(matrix[0][0]) * d(matrix[1][1])) - (
d(matrix[0][1]) * d(matrix[1][0])
)
# Transpose the cofactor matrix (Adjoint matrix)
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
__snake_case: Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE__)
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE__)) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""")
| 293
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """LayoutLMv3ImageProcessor"""
lowerCAmelCase__ = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self : Union[str, Any] , A : str=None , A : Union[str, Any]=None , **A : Any ):
__snake_case: Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A , )
__snake_case: Tuple = kwargs.pop("""feature_extractor""" )
__snake_case: List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A , A )
def __call__( self : Tuple , A : int , A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , A : Union[List[List[int]], List[List[List[int]]]] = None , A : Optional[Union[List[int], List[List[int]]]] = None , A : bool = True , A : Union[bool, str, PaddingStrategy] = False , A : Union[bool, str, TruncationStrategy] = None , A : Optional[int] = None , A : int = 0 , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[bool] = None , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = True , A : Optional[Union[str, TensorType]] = None , **A : str , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
__snake_case: Union[str, Any] = self.image_processor(images=A , return_tensors=A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(A , A ):
__snake_case: Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
__snake_case: List[str] = features["""words"""]
__snake_case: List[Any] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
# add pixel values
__snake_case: Optional[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
__snake_case: Dict = self.get_overflowing_images(A , encoded_inputs["""overflow_to_sample_mapping"""] )
__snake_case: Optional[Any] = images
return encoded_inputs
def UpperCAmelCase__ ( self : Any , A : Union[str, Any] , A : Tuple ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__snake_case: int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(A ) != len(A ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(A )} and {len(A )}''' )
return images_with_overflow
def UpperCAmelCase__ ( self : int , *A : Optional[int] , **A : int ):
return self.tokenizer.batch_decode(*A , **A )
def UpperCAmelCase__ ( self : List[str] , *A : str , **A : Union[str, Any] ):
return self.tokenizer.decode(*A , **A )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase__ ( self : Any ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Dict ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A , )
return self.image_processor
| 360
|
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE__)
if number < 1:
__snake_case: Optional[int] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE__)
elif number == 1:
return 3
elif number == 2:
return 5
else:
__snake_case: List[Any] = int(math.log(number // 3 , 2)) + 2
__snake_case: str = [3, 5]
__snake_case: int = 2
__snake_case: List[str] = 3
for block in range(1 , SCREAMING_SNAKE_CASE__):
for _ in range(SCREAMING_SNAKE_CASE__):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__UpperCAmelCase : Optional[int] = 0
try:
__UpperCAmelCase : int = proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 293
| 0
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Tuple:
# Initialise PyTorch model
__snake_case: Any = BertConfig.from_json_file(SCREAMING_SNAKE_CASE__)
print(F'''Building PyTorch model from configuration: {config}''')
__snake_case: List[str] = BertForPreTraining(SCREAMING_SNAKE_CASE__)
# Load weights from tf checkpoint
load_tf_weights_in_bert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
__UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 361
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
class __snake_case ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
@register_to_config
def __init__( self : Union[str, Any] , A : int = 3 , A : int = 3 , A : Tuple[str] = ("DownEncoderBlock2D",) , A : Tuple[str] = ("UpDecoderBlock2D",) , A : Tuple[int] = (64,) , A : int = 1 , A : str = "silu" , A : int = 4 , A : int = 32 , A : int = 32 , A : float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__snake_case: Any = Encoder(
in_channels=A , out_channels=A , down_block_types=A , block_out_channels=A , layers_per_block=A , act_fn=A , norm_num_groups=A , double_z=A , )
# pass init params to Decoder
__snake_case: int = Decoder(
in_channels=A , out_channels=A , up_block_types=A , block_out_channels=A , layers_per_block=A , norm_num_groups=A , act_fn=A , )
__snake_case: Dict = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__snake_case: int = nn.Convad(A , A , 1 )
__snake_case: List[str] = False
__snake_case: Optional[int] = False
# only relevant if vae tiling is enabled
__snake_case: Any = self.config.sample_size
__snake_case: int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__snake_case: Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__snake_case: Optional[int] = 0.25
def UpperCAmelCase__ ( self : int , A : List[str] , A : Optional[Any]=False ):
if isinstance(A , (Encoder, Decoder) ):
__snake_case: str = value
def UpperCAmelCase__ ( self : str , A : bool = True ):
__snake_case: Union[str, Any] = use_tiling
def UpperCAmelCase__ ( self : Optional[int] ):
self.enable_tiling(A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[str] = True
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Any = {}
def fn_recursive_add_processors(A : str , A : torch.nn.Module , A : Dict[str, AttentionProcessor] ):
if hasattr(A , """set_processor""" ):
__snake_case: List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , A , A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A , A , A )
return processors
def UpperCAmelCase__ ( self : Optional[int] , A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
__snake_case: Any = len(self.attn_processors.keys() )
if isinstance(A , A ) and len(A ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(A )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(A : str , A : torch.nn.Module , A : Optional[Any] ):
if hasattr(A , """set_processor""" ):
if not isinstance(A , A ):
module.set_processor(A )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , A , A )
for name, module in self.named_children():
fn_recursive_attn_processor(A , A , A )
def UpperCAmelCase__ ( self : List[str] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(A , return_dict=A )
if self.use_slicing and x.shape[0] > 1:
__snake_case: List[Any] = [self.encoder(A ) for x_slice in x.split(1 )]
__snake_case: Optional[Any] = torch.cat(A )
else:
__snake_case: str = self.encoder(A )
__snake_case: Any = self.quant_conv(A )
__snake_case: Tuple = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(A , return_dict=A )
__snake_case: Optional[int] = self.post_quant_conv(A )
__snake_case: Union[str, Any] = self.decoder(A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
@apply_forward_hook
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_slicing and z.shape[0] > 1:
__snake_case: Union[str, Any] = [self._decode(A ).sample for z_slice in z.split(1 )]
__snake_case: List[str] = torch.cat(A )
else:
__snake_case: int = self._decode(A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : Any , A : Tuple , A : int , A : List[Any] ):
__snake_case: int = min(a.shape[2] , b.shape[2] , A )
for y in range(A ):
__snake_case: Dict = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[Any] , A : List[str] , A : List[str] ):
__snake_case: Dict = min(a.shape[3] , b.shape[3] , A )
for x in range(A ):
__snake_case: Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def UpperCAmelCase__ ( self : int , A : torch.FloatTensor , A : bool = True ):
__snake_case: List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__snake_case: Dict = int(self.tile_latent_min_size * self.tile_overlap_factor )
__snake_case: Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__snake_case: Optional[int] = []
for i in range(0 , x.shape[2] , A ):
__snake_case: Optional[int] = []
for j in range(0 , x.shape[3] , A ):
__snake_case: int = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__snake_case: Tuple = self.encoder(A )
__snake_case: Dict = self.quant_conv(A )
row.append(A )
rows.append(A )
__snake_case: Tuple = []
for i, row in enumerate(A ):
__snake_case: str = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Optional[Any] = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: Tuple = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Tuple = torch.cat(A , dim=2 )
__snake_case: Optional[int] = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : torch.FloatTensor , A : bool = True ):
__snake_case: Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__snake_case: str = int(self.tile_sample_min_size * self.tile_overlap_factor )
__snake_case: int = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__snake_case: List[Any] = []
for i in range(0 , z.shape[2] , A ):
__snake_case: Optional[Any] = []
for j in range(0 , z.shape[3] , A ):
__snake_case: Dict = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__snake_case: Any = self.post_quant_conv(A )
__snake_case: Optional[Any] = self.decoder(A )
row.append(A )
rows.append(A )
__snake_case: Optional[Any] = []
for i, row in enumerate(A ):
__snake_case: Optional[Any] = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Tuple = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: List[str] = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Dict = torch.cat(A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : List[Any] , A : torch.FloatTensor , A : bool = False , A : bool = True , A : Optional[torch.Generator] = None , ):
__snake_case: Optional[Any] = sample
__snake_case: Union[str, Any] = self.encode(A ).latent_dist
if sample_posterior:
__snake_case: Optional[Any] = posterior.sample(generator=A )
else:
__snake_case: Dict = posterior.mode()
__snake_case: Any = self.decode(A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
| 293
| 0
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
class __snake_case ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
@register_to_config
def __init__( self : Union[str, Any] , A : int = 3 , A : int = 3 , A : Tuple[str] = ("DownEncoderBlock2D",) , A : Tuple[str] = ("UpDecoderBlock2D",) , A : Tuple[int] = (64,) , A : int = 1 , A : str = "silu" , A : int = 4 , A : int = 32 , A : int = 32 , A : float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__snake_case: Any = Encoder(
in_channels=A , out_channels=A , down_block_types=A , block_out_channels=A , layers_per_block=A , act_fn=A , norm_num_groups=A , double_z=A , )
# pass init params to Decoder
__snake_case: int = Decoder(
in_channels=A , out_channels=A , up_block_types=A , block_out_channels=A , layers_per_block=A , norm_num_groups=A , act_fn=A , )
__snake_case: Dict = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__snake_case: int = nn.Convad(A , A , 1 )
__snake_case: List[str] = False
__snake_case: Optional[int] = False
# only relevant if vae tiling is enabled
__snake_case: Any = self.config.sample_size
__snake_case: int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__snake_case: Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__snake_case: Optional[int] = 0.25
def UpperCAmelCase__ ( self : int , A : List[str] , A : Optional[Any]=False ):
if isinstance(A , (Encoder, Decoder) ):
__snake_case: str = value
def UpperCAmelCase__ ( self : str , A : bool = True ):
__snake_case: Union[str, Any] = use_tiling
def UpperCAmelCase__ ( self : Optional[int] ):
self.enable_tiling(A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[str] = True
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Any = {}
def fn_recursive_add_processors(A : str , A : torch.nn.Module , A : Dict[str, AttentionProcessor] ):
if hasattr(A , """set_processor""" ):
__snake_case: List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , A , A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A , A , A )
return processors
def UpperCAmelCase__ ( self : Optional[int] , A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
__snake_case: Any = len(self.attn_processors.keys() )
if isinstance(A , A ) and len(A ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(A )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(A : str , A : torch.nn.Module , A : Optional[Any] ):
if hasattr(A , """set_processor""" ):
if not isinstance(A , A ):
module.set_processor(A )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , A , A )
for name, module in self.named_children():
fn_recursive_attn_processor(A , A , A )
def UpperCAmelCase__ ( self : List[str] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(A , return_dict=A )
if self.use_slicing and x.shape[0] > 1:
__snake_case: List[Any] = [self.encoder(A ) for x_slice in x.split(1 )]
__snake_case: Optional[Any] = torch.cat(A )
else:
__snake_case: str = self.encoder(A )
__snake_case: Any = self.quant_conv(A )
__snake_case: Tuple = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(A , return_dict=A )
__snake_case: Optional[int] = self.post_quant_conv(A )
__snake_case: Union[str, Any] = self.decoder(A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
@apply_forward_hook
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_slicing and z.shape[0] > 1:
__snake_case: Union[str, Any] = [self._decode(A ).sample for z_slice in z.split(1 )]
__snake_case: List[str] = torch.cat(A )
else:
__snake_case: int = self._decode(A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : Any , A : Tuple , A : int , A : List[Any] ):
__snake_case: int = min(a.shape[2] , b.shape[2] , A )
for y in range(A ):
__snake_case: Dict = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[Any] , A : List[str] , A : List[str] ):
__snake_case: Dict = min(a.shape[3] , b.shape[3] , A )
for x in range(A ):
__snake_case: Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def UpperCAmelCase__ ( self : int , A : torch.FloatTensor , A : bool = True ):
__snake_case: List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__snake_case: Dict = int(self.tile_latent_min_size * self.tile_overlap_factor )
__snake_case: Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__snake_case: Optional[int] = []
for i in range(0 , x.shape[2] , A ):
__snake_case: Optional[int] = []
for j in range(0 , x.shape[3] , A ):
__snake_case: int = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__snake_case: Tuple = self.encoder(A )
__snake_case: Dict = self.quant_conv(A )
row.append(A )
rows.append(A )
__snake_case: Tuple = []
for i, row in enumerate(A ):
__snake_case: str = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Optional[Any] = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: Tuple = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Tuple = torch.cat(A , dim=2 )
__snake_case: Optional[int] = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : torch.FloatTensor , A : bool = True ):
__snake_case: Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__snake_case: str = int(self.tile_sample_min_size * self.tile_overlap_factor )
__snake_case: int = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__snake_case: List[Any] = []
for i in range(0 , z.shape[2] , A ):
__snake_case: Optional[Any] = []
for j in range(0 , z.shape[3] , A ):
__snake_case: Dict = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__snake_case: Any = self.post_quant_conv(A )
__snake_case: Optional[Any] = self.decoder(A )
row.append(A )
rows.append(A )
__snake_case: Optional[Any] = []
for i, row in enumerate(A ):
__snake_case: Optional[Any] = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Tuple = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: List[str] = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Dict = torch.cat(A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : List[Any] , A : torch.FloatTensor , A : bool = False , A : bool = True , A : Optional[torch.Generator] = None , ):
__snake_case: Optional[Any] = sample
__snake_case: Union[str, Any] = self.encode(A ).latent_dist
if sample_posterior:
__snake_case: Optional[Any] = posterior.sample(generator=A )
else:
__snake_case: Dict = posterior.mode()
__snake_case: Any = self.decode(A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
| 362
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """sew-d"""
def __init__( self : Dict , A : Any=32 , A : Dict=768 , A : Optional[Any]=12 , A : Union[str, Any]=12 , A : Union[str, Any]=3_072 , A : Optional[Any]=2 , A : Union[str, Any]=512 , A : List[Any]=256 , A : Dict=True , A : Union[str, Any]=True , A : Optional[int]=("p2c", "c2p") , A : str="layer_norm" , A : Dict="gelu_python" , A : Tuple=0.1 , A : Any=0.1 , A : Tuple=0.1 , A : Optional[int]=0.0 , A : Any=0.1 , A : Any=0.02 , A : Dict=1E-7 , A : str=1E-5 , A : int="group" , A : int="gelu" , A : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A : Optional[int]=False , A : int=128 , A : int=16 , A : Optional[Any]=True , A : List[Any]=0.05 , A : Any=10 , A : Dict=2 , A : List[Any]=0.0 , A : Union[str, Any]=10 , A : int=0 , A : List[Any]="mean" , A : Union[str, Any]=False , A : Any=False , A : Optional[int]=256 , A : List[Any]=0 , A : Any=1 , A : List[Any]=2 , **A : List[Any] , ):
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
__snake_case: Optional[int] = hidden_size
__snake_case: str = feat_extract_norm
__snake_case: int = feat_extract_activation
__snake_case: str = list(A )
__snake_case: Any = list(A )
__snake_case: str = list(A )
__snake_case: Union[str, Any] = conv_bias
__snake_case: int = num_conv_pos_embeddings
__snake_case: str = num_conv_pos_embedding_groups
__snake_case: List[Any] = len(self.conv_dim )
__snake_case: List[str] = num_hidden_layers
__snake_case: Union[str, Any] = intermediate_size
__snake_case: Dict = squeeze_factor
__snake_case: List[Any] = max_position_embeddings
__snake_case: List[Any] = position_buckets
__snake_case: List[str] = share_att_key
__snake_case: int = relative_attention
__snake_case: Union[str, Any] = norm_rel_ebd
__snake_case: List[str] = list(A )
__snake_case: Tuple = hidden_act
__snake_case: List[Any] = num_attention_heads
__snake_case: str = hidden_dropout
__snake_case: int = attention_dropout
__snake_case: Dict = activation_dropout
__snake_case: Any = feat_proj_dropout
__snake_case: int = final_dropout
__snake_case: List[Any] = layer_norm_eps
__snake_case: List[str] = feature_layer_norm_eps
__snake_case: List[Any] = initializer_range
__snake_case: List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case: List[Any] = apply_spec_augment
__snake_case: List[Any] = mask_time_prob
__snake_case: str = mask_time_length
__snake_case: List[str] = mask_time_min_masks
__snake_case: str = mask_feature_prob
__snake_case: Optional[int] = mask_feature_length
__snake_case: Dict = mask_feature_min_masks
# ctc loss
__snake_case: Any = ctc_loss_reduction
__snake_case: str = ctc_zero_infinity
# sequence classification
__snake_case: Optional[Any] = use_weighted_layer_sum
__snake_case: List[Any] = classifier_proj_size
@property
def UpperCAmelCase__ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 293
| 0
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __snake_case ( yaml.SafeLoader ):
'''simple docstring'''
def UpperCAmelCase__ ( self : int , A : Optional[int] ):
__snake_case: Tuple = [self.constructed_objects[key_node] for key_node, _ in node.value]
__snake_case: str = [tuple(A ) if isinstance(A , A ) else key for key in keys]
__snake_case: Optional[Any] = Counter(A )
__snake_case: Optional[Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def UpperCAmelCase__ ( self : Any , A : Union[str, Any] , A : List[str]=False ):
__snake_case: Tuple = super().construct_mapping(A , deep=A )
self._check_no_duplicates_on_constructed_node(A )
return mapping
def A__ ( SCREAMING_SNAKE_CASE__) -> Tuple[Optional[str], str]:
__snake_case: List[Any] = list(readme_content.splitlines())
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__snake_case: str = full_content[1:].index("""---""") + 1
__snake_case: Union[str, Any] = """\n""".join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(SCREAMING_SNAKE_CASE__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , A : Path ):
with open(A , encoding="""utf-8""" ) as readme_file:
__snake_case: Any = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(A )
else:
return cls()
def UpperCAmelCase__ ( self : Dict , A : Path ):
if path.exists():
with open(A , encoding="""utf-8""" ) as readme_file:
__snake_case: Optional[int] = readme_file.read()
else:
__snake_case: Optional[Any] = None
__snake_case: str = self._to_readme(A )
with open(A , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(A )
def UpperCAmelCase__ ( self : Any , A : Optional[str] = None ):
if readme_content is not None:
__snake_case: Optional[Any] = _split_yaml_from_readme(A )
__snake_case: Optional[int] = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
__snake_case: int = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , A : str ):
__snake_case: List[str] = yaml.load(A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__snake_case: int = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**A )
def UpperCAmelCase__ ( self : List[str] ):
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=A , allow_unicode=A , encoding="""utf-8""" , ).decode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__UpperCAmelCase : Tuple = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
__UpperCAmelCase : Union[str, Any] = ap.parse_args()
__UpperCAmelCase : Tuple = Path(args.readme_filepath)
__UpperCAmelCase : Any = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 363
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCAmelCase : Any = random.Random()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> Any:
if rng is None:
__snake_case: Dict = global_rng
__snake_case: str = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : List[str] , A : List[Any]=7 , A : Optional[int]=400 , A : List[Any]=2_000 , A : Dict=2_048 , A : Tuple=128 , A : List[Any]=1 , A : Tuple=512 , A : str=30 , A : Optional[Any]=44_100 , ):
__snake_case: Dict = parent
__snake_case: Optional[Any] = batch_size
__snake_case: Optional[int] = min_seq_length
__snake_case: Optional[Any] = max_seq_length
__snake_case: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case: Any = spectrogram_length
__snake_case: Any = feature_size
__snake_case: Union[str, Any] = num_audio_channels
__snake_case: Any = hop_length
__snake_case: List[str] = chunk_length
__snake_case: Any = sampling_rate
def UpperCAmelCase__ ( self : List[Any] ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase__ ( self : List[str] , A : str=False , A : int=False ):
def _flatten(A : Dict ):
return list(itertools.chain(*A ) )
if equal_length:
__snake_case: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case: int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case: Tuple = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = TvltFeatureExtractor
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: str = TvltFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : int ):
__snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A , """spectrogram_length""" ) )
self.assertTrue(hasattr(A , """feature_size""" ) )
self.assertTrue(hasattr(A , """num_audio_channels""" ) )
self.assertTrue(hasattr(A , """hop_length""" ) )
self.assertTrue(hasattr(A , """chunk_length""" ) )
self.assertTrue(hasattr(A , """sampling_rate""" ) )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: Tuple = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
__snake_case: int = self.feature_extraction_class.from_pretrained(A )
__snake_case: List[str] = feat_extract_first.to_dict()
__snake_case: str = feat_extract_second.to_dict()
__snake_case: List[Any] = dict_first.pop("""mel_filters""" )
__snake_case: str = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: str = os.path.join(A , """feat_extract.json""" )
feat_extract_first.to_json_file(A )
__snake_case: List[Any] = self.feature_extraction_class.from_json_file(A )
__snake_case: Dict = feat_extract_first.to_dict()
__snake_case: Any = feat_extract_second.to_dict()
__snake_case: int = dict_first.pop("""mel_filters""" )
__snake_case: int = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Any ):
# Initialize feature_extractor
__snake_case: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__snake_case: Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: str = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
__snake_case: int = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__snake_case: Optional[int] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__snake_case: Union[str, Any] = feature_extractor(
A , return_tensors="""np""" , sampling_rate=44_100 , mask_audio=A ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__snake_case: Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case: Union[str, Any] = np.asarray(A )
__snake_case: List[Any] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[str] ):
__snake_case: Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__snake_case: List[Any] = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = self._load_datasamples(1 )
__snake_case: Optional[int] = TvltFeatureExtractor()
__snake_case: Optional[Any] = feature_extractor(A , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__snake_case: str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A , atol=1E-4 ) )
| 293
| 0
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCAmelCase : Any = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCAmelCase : Union[str, Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print("\n".join(upper_files) + "\n")
__UpperCAmelCase : Optional[Any] = [file for file in filepaths if " " in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print("\n".join(space_files) + "\n")
__UpperCAmelCase : List[Any] = [file for file in filepaths if "-" in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print("\n".join(hyphen_files) + "\n")
__UpperCAmelCase : int = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print("\n".join(nodir_files) + "\n")
__UpperCAmelCase : Optional[int] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 364
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case: Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase__ ( self : str ):
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self : List[str] , A : Union[str, List[str]] , A : int = 512 , A : int = 512 , A : int = 50 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , A : Optional[torch.FloatTensor] = None , **A : Optional[Any] , ):
if isinstance(A , A ):
__snake_case: int = 1
elif isinstance(A , A ):
__snake_case: Optional[Any] = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
__snake_case: Tuple = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case: Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case: List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case: Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case: Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case: List[Any] = text_embeddings.shape
__snake_case: Tuple = text_embeddings.repeat(1 , A , 1 )
__snake_case: Dict = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case: List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case: List[str]
if negative_prompt is None:
__snake_case: Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
__snake_case: List[str] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case: str = negative_prompt
__snake_case: Any = text_input_ids.shape[-1]
__snake_case: Dict = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
__snake_case: Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case: Optional[Any] = uncond_embeddings.shape[1]
__snake_case: str = uncond_embeddings.repeat(A , A , 1 )
__snake_case: List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case: Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case: Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case: List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case: Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case: Any = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
__snake_case: Tuple = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
__snake_case: Dict = torch.randn(
A , generator=A , device=self.device , dtype=A )
__snake_case: Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case: Optional[int] = latents_reference.to(self.device )
__snake_case: List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case: int = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case: Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case: int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case: Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case: List[Any] = 0 if dx < 0 else dx
__snake_case: Dict = 0 if dy < 0 else dy
__snake_case: List[str] = max(-dx , 0 )
__snake_case: int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case: List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case: str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case: Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case: Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case: int = {}
if accepts_eta:
__snake_case: Optional[Any] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
__snake_case: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case: Dict = self.scheduler.scale_model_input(A , A )
# predict the noise residual
__snake_case: List[Any] = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case: Any = noise_pred.chunk(2 )
__snake_case: Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case: str = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
__snake_case: Optional[int] = 1 / 0.1_8215 * latents
__snake_case: List[Any] = self.vae.decode(A ).sample
__snake_case: str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case: List[Any] = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
__snake_case , __snake_case: List[str] = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case: Optional[int] = None
if output_type == "pil":
__snake_case: Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 293
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {"vocab_file": "spiece.model"}
__UpperCAmelCase : Optional[int] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , A : int , A : int=False , A : Dict=True , A : Tuple=False , A : Dict="<s>" , A : str="</s>" , A : Optional[int]="<unk>" , A : int="<sep>" , A : Optional[Any]="<pad>" , A : List[str]="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , A : Optional[Dict[str, Any]] = None , **A : Optional[Any] , ):
__snake_case: Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
__snake_case: str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
__snake_case: Optional[Any] = 3
__snake_case: Tuple = do_lower_case
__snake_case: Dict = remove_space
__snake_case: Dict = keep_accents
__snake_case: Optional[Any] = vocab_file
__snake_case: Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
__snake_case: Any = jieba
__snake_case: List[str] = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCAmelCase__ ( self : Optional[Any] ):
return len(self.sp_model )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
__snake_case: Any = self.__dict__.copy()
__snake_case: Any = None
return state
def __setstate__( self : Optional[Any] , A : Tuple ):
__snake_case: List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case: Any = {}
__snake_case: Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Any , A : List[Any] ):
if self.remove_space:
__snake_case: Any = """ """.join(inputs.strip().split() )
else:
__snake_case: int = inputs
__snake_case: List[str] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__snake_case: List[str] = unicodedata.normalize("""NFKD""" , A )
__snake_case: Dict = """""".join([c for c in outputs if not unicodedata.combining(A )] )
if self.do_lower_case:
__snake_case: Tuple = outputs.lower()
return outputs
def UpperCAmelCase__ ( self : List[Any] , A : str ):
__snake_case: Any = self.preprocess_text(A )
__snake_case: Tuple = self.sp_model.encode(A , out_type=A )
__snake_case: List[Any] = []
for piece in pieces:
if len(A ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__snake_case: Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(A , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__snake_case: Optional[Any] = cur_pieces[1:]
else:
__snake_case: int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A )
else:
new_pieces.append(A )
return new_pieces
def UpperCAmelCase__ ( self : Tuple , A : Optional[Any] ):
return self.sp_model.PieceToId(A )
def UpperCAmelCase__ ( self : str , A : Optional[int] ):
return self.sp_model.IdToPiece(A )
def UpperCAmelCase__ ( self : Optional[Any] , A : str ):
__snake_case: Any = """""".join(A ).replace(A , """ """ ).strip()
return out_string
def UpperCAmelCase__ ( self : Tuple , A : List[int] , A : Optional[List[int]] = None ):
__snake_case: List[str] = [self.sep_token_id]
__snake_case: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self : Any , A : List[int] , A : Optional[List[int]] = None , A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is not None:
return ([0] * len(A )) + [1] + ([0] * len(A )) + [1, 1]
return ([0] * len(A )) + [1, 1]
def UpperCAmelCase__ ( self : Optional[Any] , A : List[int] , A : Optional[List[int]] = None ):
__snake_case: Dict = [self.sep_token_id]
__snake_case: Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase__ ( self : List[Any] , A : str , A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case: List[str] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
__snake_case: Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : int , *A : List[str] , **A : str ):
__snake_case: Tuple = super()._decode(*A , **A )
__snake_case: Union[str, Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 365
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase : Optional[int] = "\\n\n"
__UpperCAmelCase : Tuple = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
__UpperCAmelCase : Tuple = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def UpperCAmelCase__ ( self : int , A : str , A : Optional[Any] , A : int = 16 , A : bool = True , A : Optional[int]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__snake_case: Optional[Any] = """cuda"""
else:
__snake_case: str = """cuda""" if torch.cuda.is_available() else """cpu"""
__snake_case: Dict = AutoModelForCausalLM.from_pretrained(A )
__snake_case: List[str] = model.to(A )
__snake_case: Optional[Any] = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__snake_case: Dict = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__snake_case: Tuple = model.config.max_length - 1
else:
__snake_case: Optional[Any] = model.config.max_length
__snake_case: Optional[int] = tokenizer(
A , add_special_tokens=A , padding=A , truncation=A , max_length=A , return_tensors="""pt""" , return_attention_mask=A , ).to(A )
__snake_case: Tuple = encodings["""input_ids"""]
__snake_case: Any = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__snake_case: Optional[int] = []
__snake_case: Optional[int] = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(A ) , A ) ):
__snake_case: Dict = min(start_index + batch_size , len(A ) )
__snake_case: Optional[int] = encoded_texts[start_index:end_index]
__snake_case: List[Any] = attn_masks[start_index:end_index]
if add_start_token:
__snake_case: Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
__snake_case: Optional[Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__snake_case: Union[str, Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A ), attn_mask] , dim=1 )
__snake_case: List[str] = encoded_batch
with torch.no_grad():
__snake_case: Union[str, Any] = model(A , attention_mask=A ).logits
__snake_case: List[str] = out_logits[..., :-1, :].contiguous()
__snake_case: Optional[Any] = labels[..., 1:].contiguous()
__snake_case: Dict = attn_mask[..., 1:].contiguous()
__snake_case: Optional[Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 293
| 0
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def A__ ( SCREAMING_SNAKE_CASE__) -> Tuple:
return 1.0 / (1.0 + np.exp(-_outputs))
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
__snake_case: Optional[int] = np.max(_outputs , axis=-1 , keepdims=SCREAMING_SNAKE_CASE__)
__snake_case: List[str] = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """sigmoid"""
lowerCAmelCase__ = """softmax"""
lowerCAmelCase__ = """none"""
@add_end_docstrings(
__lowerCamelCase , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self : str , **A : str ):
super().__init__(**A )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def UpperCAmelCase__ ( self : Union[str, Any] , A : Any=None , A : Union[str, Any]=None , A : Union[str, Any]="" , **A : Optional[int] ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
__snake_case: str = tokenizer_kwargs
__snake_case: List[str] = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
__snake_case: int = self.model.config.return_all_scores
if isinstance(A , A ) or top_k is None:
__snake_case: Tuple = top_k
__snake_case: Tuple = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , A , )
if return_all_scores:
__snake_case: Optional[int] = None
else:
__snake_case: int = 1
if isinstance(A , A ):
__snake_case: Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__snake_case: Tuple = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , *A : str , **A : List[Any] ):
__snake_case: Any = super().__call__(*A , **A )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__snake_case: int = """top_k""" not in kwargs
if isinstance(args[0] , A ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def UpperCAmelCase__ ( self : Optional[int] , A : Optional[int] , **A : Dict ):
__snake_case: Dict = self.framework
if isinstance(A , A ):
return self.tokenizer(**A , return_tensors=A , **A )
elif isinstance(A , A ) and len(A ) == 1 and isinstance(inputs[0] , A ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=A , **A )
elif isinstance(A , A ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(A , return_tensors=A , **A )
def UpperCAmelCase__ ( self : Optional[int] , A : Optional[Any] ):
return self.model(**A )
def UpperCAmelCase__ ( self : int , A : Tuple , A : Tuple=None , A : Dict=1 , A : Optional[Any]=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__snake_case: Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__snake_case: List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
__snake_case: int = self.model.config.function_to_apply
else:
__snake_case: int = ClassificationFunction.NONE
__snake_case: str = model_outputs["""logits"""][0]
__snake_case: Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__snake_case: int = sigmoid(A )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__snake_case: List[str] = softmax(A )
elif function_to_apply == ClassificationFunction.NONE:
__snake_case: Dict = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__snake_case: Optional[Any] = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(A )
]
if not _legacy:
dict_scores.sort(key=lambda A : x["score"] , reverse=A )
if top_k is not None:
__snake_case: int = dict_scores[:top_k]
return dict_scores
| 366
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293
| 0
|
from math import pi, sqrt
def A__ ( SCREAMING_SNAKE_CASE__) -> float:
if num <= 0:
raise ValueError("""math domain error""")
if num > 171.5:
raise OverflowError("""math range error""")
elif num - int(SCREAMING_SNAKE_CASE__) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""")
elif num == 0.5:
return sqrt(SCREAMING_SNAKE_CASE__)
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1)
def A__ ( ) -> None:
assert gamma(0.5) == sqrt(SCREAMING_SNAKE_CASE__)
assert gamma(1) == 1.0
assert gamma(2) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCAmelCase : Optional[int] = 1.0
while num:
__UpperCAmelCase : Union[str, Any] = float(input("Gamma of: "))
print(f'gamma({num}) = {gamma(num)}')
print("\nEnter 0 to exit...")
| 367
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """num_attention_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self : int , A : str , A : Dict=13 , A : str=32 , A : Any=2 , A : Optional[Any]=3 , A : str=640 , A : Tuple=4 , A : Dict="silu" , A : List[Any]=3 , A : Any=32 , A : Any=0.1 , A : int=0.1 , A : Dict=0.1 , A : Optional[Any]=0.02 , A : List[Any]=True , A : Tuple=True , A : Any=10 , A : Optional[int]=None , ):
__snake_case: List[Any] = parent
__snake_case: Dict = batch_size
__snake_case: int = image_size
__snake_case: Tuple = patch_size
__snake_case: Tuple = num_channels
__snake_case: str = last_hidden_size
__snake_case: Dict = num_attention_heads
__snake_case: Dict = hidden_act
__snake_case: Tuple = conv_kernel_size
__snake_case: List[str] = output_stride
__snake_case: List[str] = hidden_dropout_prob
__snake_case: Optional[Any] = attention_probs_dropout_prob
__snake_case: int = classifier_dropout_prob
__snake_case: List[Any] = use_labels
__snake_case: Union[str, Any] = is_training
__snake_case: Union[str, Any] = num_labels
__snake_case: str = initializer_range
__snake_case: List[Any] = scope
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case: Tuple = None
__snake_case: Any = None
if self.use_labels:
__snake_case: Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case: str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case: Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : int ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : str , A : Optional[Any] , A : Any , A : Any , A : Union[str, Any] ):
__snake_case: List[Any] = MobileViTModel(config=A )
model.to(A )
model.eval()
__snake_case: int = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : str , A : List[Any] , A : Any , A : Any , A : int ):
__snake_case: str = self.num_labels
__snake_case: Optional[int] = MobileViTForImageClassification(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[Any] , A : int , A : str ):
__snake_case: List[Any] = self.num_labels
__snake_case: Dict = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case: Tuple = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case: Any = config_and_inputs
__snake_case: Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = MobileViTModelTester(self )
__snake_case: str = MobileViTConfigTester(self , config_class=A , has_text_modality=A )
def UpperCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = model_class(A )
__snake_case: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case: Optional[int] = [*signature.parameters.keys()]
__snake_case: List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ ( self : Dict ):
def check_hidden_states_output(A : List[Any] , A : int , A : Tuple ):
__snake_case: List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__snake_case: str = model(**self._prepare_for_class(A , A ) )
__snake_case: Optional[int] = outputs.hidden_states
__snake_case: Any = 5
self.assertEqual(len(A ) , A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case: Union[str, Any] = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case: Dict = True
check_hidden_states_output(A , A , A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case: List[Any] = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def A__ ( ) -> Optional[int]:
__snake_case: Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase__ ( self : Dict ):
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Tuple = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(A )
__snake_case: str = self.default_image_processor
__snake_case: Optional[Any] = prepare_img()
__snake_case: List[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
# verify the logits
__snake_case: List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A )
__snake_case: Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = model.to(A )
__snake_case: Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[Any] = prepare_img()
__snake_case: List[str] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: List[Any] = model(**A )
__snake_case: Optional[int] = outputs.logits
# verify the logits
__snake_case: Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A )
__snake_case: Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: str = model.to(A )
__snake_case: Optional[Any] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = prepare_img()
__snake_case: Optional[int] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
__snake_case: List[Any] = outputs.logits.detach().cpu()
__snake_case: List[str] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)] )
__snake_case: str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A )
__snake_case: int = image_processor.post_process_semantic_segmentation(outputs=A )
__snake_case: Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A )
| 293
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """realm"""
def __init__( self : List[Any] , A : Tuple=30_522 , A : Union[str, Any]=768 , A : List[Any]=128 , A : Union[str, Any]=12 , A : str=12 , A : Union[str, Any]=8 , A : Optional[Any]=3_072 , A : Dict="gelu_new" , A : int=0.1 , A : List[Any]=0.1 , A : Union[str, Any]=512 , A : List[Any]=2 , A : Tuple=0.02 , A : List[Any]=1E-12 , A : List[str]=256 , A : Optional[Any]=10 , A : Union[str, Any]=1E-3 , A : List[str]=5 , A : int=320 , A : List[str]=13_353_718 , A : str=5_000 , A : List[Any]=1 , A : Union[str, Any]=0 , A : Union[str, Any]=2 , **A : Dict , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
# Common config
__snake_case: Optional[Any] = vocab_size
__snake_case: int = max_position_embeddings
__snake_case: List[Any] = hidden_size
__snake_case: Optional[int] = retriever_proj_size
__snake_case: Any = num_hidden_layers
__snake_case: Optional[Any] = num_attention_heads
__snake_case: List[Any] = num_candidates
__snake_case: List[str] = intermediate_size
__snake_case: Tuple = hidden_act
__snake_case: List[Any] = hidden_dropout_prob
__snake_case: Union[str, Any] = attention_probs_dropout_prob
__snake_case: Any = initializer_range
__snake_case: List[str] = type_vocab_size
__snake_case: str = layer_norm_eps
# Reader config
__snake_case: int = span_hidden_size
__snake_case: int = max_span_width
__snake_case: Union[str, Any] = reader_layer_norm_eps
__snake_case: Optional[Any] = reader_beam_size
__snake_case: str = reader_seq_len
# Retrieval config
__snake_case: str = num_block_records
__snake_case: Dict = searcher_beam_size
| 368
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : List[str] ):
__snake_case , __snake_case: List[str] = super().prepare_init_args_and_inputs_for_common()
__snake_case: List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : int ):
__snake_case , __snake_case: Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Any ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: str = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[str] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
__snake_case: List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: int = super().prepare_init_args_and_inputs_for_common()
__snake_case: int = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[int] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ):
__snake_case: Any = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(A )
| 293
| 0
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __snake_case :
'''simple docstring'''
@staticmethod
def UpperCAmelCase__ ( *A : List[Any] , **A : Union[str, Any] ):
pass
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
__snake_case: str = hashlib.mda(image.tobytes())
return m.hexdigest()[:10]
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: Tuple = np.array(SCREAMING_SNAKE_CASE__)
__snake_case: Union[str, Any] = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE__), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase__ ( self : List[Any] , A : Union[str, Any] , A : str , A : Optional[int] ):
__snake_case: List[str] = MaskGenerationPipeline(model=A , image_processor=A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase__ ( self : List[Any] , A : Optional[Any] , A : str ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def UpperCAmelCase__ ( self : str ):
pass
@slow
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
__snake_case: Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
__snake_case: int = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Union[str, Any] = """facebook/sam-vit-huge"""
__snake_case: str = pipeline("""mask-generation""" , model=A )
__snake_case: int = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__snake_case: str = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
] , )
| 369
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = ort.SessionOptions()
__snake_case: List[Any] = False
return options
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: int = """A red cat sitting on a park bench"""
__snake_case: Any = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=10 , generator=A , output_type="""np""" , )
__snake_case: List[Any] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: Optional[int] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
__snake_case: List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=A , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: Optional[int] = """A red cat sitting on a park bench"""
__snake_case: Dict = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=20 , generator=A , output_type="""np""" , )
__snake_case: List[str] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Union[str, Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 293
| 0
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase : Union[str, Any] = TypeVar("KEY")
__UpperCAmelCase : List[Any] = TypeVar("VAL")
@dataclass(frozen=__lowerCamelCase , slots=__lowerCamelCase )
class __snake_case ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
class __snake_case ( _Item ):
'''simple docstring'''
def __init__( self : Any ):
super().__init__(A , A )
def __bool__( self : int ):
return False
__UpperCAmelCase : List[str] = _DeletedItem()
class __snake_case ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : List[str] , A : int = 8 , A : float = 0.75 ):
__snake_case: Tuple = initial_block_size
__snake_case: list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__snake_case: Tuple = capacity_factor
__snake_case: List[str] = 0
def UpperCAmelCase__ ( self : Optional[int] , A : KEY ):
return hash(A ) % len(self._buckets )
def UpperCAmelCase__ ( self : Any , A : int ):
return (ind + 1) % len(self._buckets )
def UpperCAmelCase__ ( self : Optional[Any] , A : int , A : KEY , A : VAL ):
__snake_case: Union[str, Any] = self._buckets[ind]
if not stored:
__snake_case: Optional[Any] = _Item(A , A )
self._len += 1
return True
elif stored.key == key:
__snake_case: List[Any] = _Item(A , A )
return True
else:
return False
def UpperCAmelCase__ ( self : str ):
__snake_case: Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(A )
def UpperCAmelCase__ ( self : str ):
if len(self._buckets ) <= self._initial_block_size:
return False
__snake_case: int = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase__ ( self : Optional[Any] , A : int ):
__snake_case: Tuple = self._buckets
__snake_case: Any = [None] * new_size
__snake_case: Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase__ ( self : Optional[int] ):
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase__ ( self : int ):
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase__ ( self : int , A : KEY ):
__snake_case: List[Any] = self._get_bucket_index(A )
for _ in range(len(self._buckets ) ):
yield ind
__snake_case: List[Any] = self._get_next_ind(A )
def UpperCAmelCase__ ( self : Optional[int] , A : KEY , A : VAL ):
for ind in self._iterate_buckets(A ):
if self._try_set(A , A , A ):
break
def __setitem__( self : Union[str, Any] , A : KEY , A : VAL ):
if self._is_full():
self._size_up()
self._add_item(A , A )
def __delitem__( self : Optional[int] , A : KEY ):
for ind in self._iterate_buckets(A ):
__snake_case: Union[str, Any] = self._buckets[ind]
if item is None:
raise KeyError(A )
if item is _deleted:
continue
if item.key == key:
__snake_case: int = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Optional[int] , A : KEY ):
for ind in self._iterate_buckets(A ):
__snake_case: int = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(A )
def __len__( self : str ):
return self._len
def __iter__( self : Tuple ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Tuple ):
__snake_case: Any = """ ,""".join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 370
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A__ ( SCREAMING_SNAKE_CASE__ = 3) -> qiskit.result.counts.Counts:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
raise TypeError("""number of qubits must be a integer.""")
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""")
if math.floor(SCREAMING_SNAKE_CASE__) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""")
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""")
__snake_case: int = QuantumRegister(SCREAMING_SNAKE_CASE__ , """qr""")
__snake_case: List[str] = ClassicalRegister(SCREAMING_SNAKE_CASE__ , """cr""")
__snake_case: Optional[Any] = QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.h(number_of_qubits - i - 1)
counter -= 1
for j in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
for k in range(number_of_qubits // 2):
quantum_circuit.swap(SCREAMING_SNAKE_CASE__ , number_of_qubits - k - 1)
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# simulate with 10000 shots
__snake_case: Union[str, Any] = Aer.get_backend("""qasm_simulator""")
__snake_case: Optional[Any] = execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1_0000)
return job.result().get_counts(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 293
| 0
|
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
def get_masked_lm_array(SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
__snake_case: Dict = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if "kernel" in name:
__snake_case: Any = array.transpose()
return torch.from_numpy(SCREAMING_SNAKE_CASE__)
def get_encoder_array(SCREAMING_SNAKE_CASE__):
__snake_case: List[str] = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
__snake_case: List[str] = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if "kernel" in name:
__snake_case: str = array.transpose()
return torch.from_numpy(SCREAMING_SNAKE_CASE__)
def get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: List[str] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
__snake_case: Tuple = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if "kernel" in name:
__snake_case: Any = array.transpose()
return torch.from_numpy(SCREAMING_SNAKE_CASE__)
def get_encoder_attention_layer_array(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: str = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
__snake_case: Any = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: str = array.reshape(SCREAMING_SNAKE_CASE__)
if "kernel" in name:
__snake_case: Union[str, Any] = array.transpose()
return torch.from_numpy(SCREAMING_SNAKE_CASE__)
print(F'''Loading model based on config from {config_path}...''')
__snake_case: List[str] = BertConfig.from_json_file(SCREAMING_SNAKE_CASE__)
__snake_case: Union[str, Any] = BertForMaskedLM(SCREAMING_SNAKE_CASE__)
# Layers
for layer_index in range(0 , config.num_hidden_layers):
__snake_case: BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__snake_case: BertSelfAttention = layer.attention.self
__snake_case: Union[str, Any] = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , """_query_dense/kernel""" , self_attn.query.weight.data.shape)
__snake_case: Optional[int] = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , """_query_dense/bias""" , self_attn.query.bias.data.shape)
__snake_case: Any = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , """_key_dense/kernel""" , self_attn.key.weight.data.shape)
__snake_case: List[str] = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , """_key_dense/bias""" , self_attn.key.bias.data.shape)
__snake_case: Union[str, Any] = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , """_value_dense/kernel""" , self_attn.value.weight.data.shape)
__snake_case: Optional[Any] = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , """_value_dense/bias""" , self_attn.value.bias.data.shape)
# Self-attention Output
__snake_case: BertSelfOutput = layer.attention.output
__snake_case: str = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , """_output_dense/kernel""" , self_output.dense.weight.data.shape)
__snake_case: List[Any] = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , """_output_dense/bias""" , self_output.dense.bias.data.shape)
__snake_case: Any = get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , """_attention_layer_norm/gamma""")
__snake_case: str = get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , """_attention_layer_norm/beta""")
# Intermediate
__snake_case: BertIntermediate = layer.intermediate
__snake_case: Optional[Any] = get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , """_intermediate_dense/kernel""")
__snake_case: List[Any] = get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , """_intermediate_dense/bias""")
# Output
__snake_case: BertOutput = layer.output
__snake_case: Optional[Any] = get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , """_output_dense/kernel""")
__snake_case: Optional[int] = get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , """_output_dense/bias""")
__snake_case: Any = get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , """_output_layer_norm/gamma""")
__snake_case: str = get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , """_output_layer_norm/beta""")
# Embeddings
__snake_case: Dict = get_encoder_array("""_position_embedding_layer/embeddings""")
__snake_case: Dict = get_encoder_array("""_type_embedding_layer/embeddings""")
__snake_case: Dict = get_encoder_array("""_embedding_norm_layer/gamma""")
__snake_case: Optional[Any] = get_encoder_array("""_embedding_norm_layer/beta""")
# LM Head
__snake_case: Optional[Any] = model.cls.predictions.transform
__snake_case: Optional[Any] = get_masked_lm_array("""dense/kernel""")
__snake_case: Optional[int] = get_masked_lm_array("""dense/bias""")
__snake_case: str = get_masked_lm_array("""layer_norm/gamma""")
__snake_case: str = get_masked_lm_array("""layer_norm/beta""")
__snake_case: List[str] = get_masked_lm_array("""embedding_table""")
# Pooling
__snake_case: int = BertPooler(config=SCREAMING_SNAKE_CASE__)
__snake_case: BertPooler = get_encoder_array("""_pooler_layer/kernel""")
__snake_case: BertPooler = get_encoder_array("""_pooler_layer/bias""")
# Export final model
model.save_pretrained(SCREAMING_SNAKE_CASE__)
# Integration test - should load without any errors ;)
__snake_case: Dict = BertForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE__)
print(new_model.eval())
print("""Model conversion was done sucessfully!""")
if __name__ == "__main__":
__UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__UpperCAmelCase : Tuple = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 371
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 293
| 0
|
from __future__ import annotations
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
return np.maximum(0 , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , *A : Dict , A : Optional[int]=None , A : Tuple=None , **A : Optional[int] ):
super().__init__(*A , **A )
__snake_case: List[Any] = eval_examples
__snake_case: str = post_process_function
def UpperCAmelCase__ ( self : List[Any] , A : Dict=None , A : int=None , A : List[Any]=None , A : str = "eval" ):
__snake_case: int = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case: Any = self.get_eval_dataloader(A )
__snake_case: Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Union[str, Any] = self.compute_metrics
__snake_case: List[str] = None
__snake_case: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Tuple = time.time()
try:
__snake_case: Any = eval_loop(
A , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: Optional[int] = compute_metrics
__snake_case: Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case: List[str] = self.post_process_function(A , A , output.predictions )
__snake_case: List[Any] = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: str = metrics.pop(A )
metrics.update(output.metrics )
else:
__snake_case: List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case: str = self.callback_handler.on_evaluate(self.args , self.state , self.control , A )
return metrics
def UpperCAmelCase__ ( self : Optional[Any] , A : List[Any] , A : List[str] , A : str=None , A : str = "test" ):
__snake_case: Optional[Any] = self.get_test_dataloader(A )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Optional[int] = self.compute_metrics
__snake_case: List[Any] = None
__snake_case: str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Dict = time.time()
try:
__snake_case: str = eval_loop(
A , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: List[Any] = compute_metrics
__snake_case: Dict = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case: Union[str, Any] = self.post_process_function(A , A , output.predictions , """predict""" )
__snake_case: str = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: List[str] = metrics.pop(A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A )
| 293
| 0
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__UpperCAmelCase : Optional[Any] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
__UpperCAmelCase : List[Any] = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
__UpperCAmelCase : Tuple = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
__UpperCAmelCase : Dict = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def UpperCAmelCase__ ( self : List[Any] , A : str ):
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def UpperCAmelCase__ ( self : List[Any] , A : Tuple , A : List[Any] , A : Union[str, Any]=0.9 , A : List[str]=3 , A : List[str]=0.5 ):
if NLTK_VERSION >= version.Version("""3.6.5""" ):
__snake_case: Any = [
meteor_score.single_meteor_score(
word_tokenize(A ) , word_tokenize(A ) , alpha=A , beta=A , gamma=A )
for ref, pred in zip(A , A )
]
else:
__snake_case: Tuple = [
meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A )
for ref, pred in zip(A , A )
]
return {"meteor": np.mean(A )}
| 351
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """rwkv"""
lowerCAmelCase__ = {"""max_position_embeddings""": """context_length"""}
def __init__( self : Dict , A : List[Any]=50_277 , A : List[Any]=1_024 , A : Union[str, Any]=4_096 , A : Tuple=32 , A : List[Any]=None , A : Tuple=None , A : Tuple=1E-5 , A : int=0 , A : Optional[int]=0 , A : Dict=6 , A : Dict=False , A : int=True , **A : List[Any] , ):
__snake_case: Tuple = vocab_size
__snake_case: Any = context_length
__snake_case: Dict = hidden_size
__snake_case: Dict = num_hidden_layers
__snake_case: Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
__snake_case: str = intermediate_size if intermediate_size is not None else 4 * hidden_size
__snake_case: Any = layer_norm_epsilon
__snake_case: int = rescale_every
__snake_case: str = use_cache
__snake_case: Dict = bos_token_id
__snake_case: Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=A , bos_token_id=A , eos_token_id=A , **A )
| 293
| 0
|
"""simple docstring"""
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
print(F'''Vertex\tShortest Distance from vertex {src}''')
for i, d in enumerate(SCREAMING_SNAKE_CASE__):
print(F'''{i}\t\t{d}''')
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[int]:
for j in range(SCREAMING_SNAKE_CASE__):
__snake_case: List[str] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""") and distance[u] + w < distance[v]:
return True
return False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> list[float]:
__snake_case: Any = [float("""inf""")] * vertex_count
__snake_case: Any = 0.0
for _ in range(vertex_count - 1):
for j in range(SCREAMING_SNAKE_CASE__):
__snake_case: int = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""") and distance[u] + w < distance[v]:
__snake_case: Union[str, Any] = distance[u] + w
__snake_case: Optional[Any] = check_negative_cycle(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if negative_cycle_exists:
raise Exception("""Negative cycle found""")
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Tuple = int(input("Enter number of vertices: ").strip())
__UpperCAmelCase : Dict = int(input("Enter number of edges: ").strip())
__UpperCAmelCase : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
__UpperCAmelCase : Union[str, Any] = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
__UpperCAmelCase : Tuple = {"src": src, "dst": dest, "weight": weight}
__UpperCAmelCase : Union[str, Any] = int(input("\nEnter shortest path source:").strip())
__UpperCAmelCase : Optional[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 352
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase : Any = 250_004
__UpperCAmelCase : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MBartaaTokenizer
lowerCAmelCase__ = MBartaaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Optional[int] = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = """<s>"""
__snake_case: Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_054 )
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Dict = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
__snake_case: int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case: Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case: List[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case: int = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
# fmt: off
__snake_case: List[str] = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case: Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Optional[int] = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Tuple = tokenizer_r.save_pretrained(A )
__snake_case: Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__snake_case: Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: Tuple = tokenizer_r.from_pretrained(A )
__snake_case: Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
__snake_case: Tuple = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: List[str] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: List[Any] = tokenizer_r.from_pretrained(A )
__snake_case: Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: Dict = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case: Any = tokenizer_r.from_pretrained(A )
__snake_case: Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """facebook/mbart-large-50-one-to-many-mmt"""
lowerCAmelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCAmelCase__ ( cls : int ):
__snake_case: MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__snake_case: str = 1
return cls
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250_038 )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertIn(A , self.tokenizer.all_special_ids )
__snake_case: Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__snake_case: str = self.tokenizer.decode(A , skip_special_tokens=A )
__snake_case: Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
__snake_case: Union[str, Any] = 10
__snake_case: List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[0] , A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(A ) , A )
def UpperCAmelCase__ ( self : Tuple ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_053, 250_001] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = tempfile.mkdtemp()
__snake_case: Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
__snake_case: Union[str, Any] = MBartaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
__snake_case: List[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__snake_case: Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case: List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
__snake_case: Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
__snake_case: Dict = targets["""input_ids"""]
__snake_case: Any = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 293
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Tuple = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 353
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCAmelCase : str = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Any , A : int , A : int , A : float , **A : Optional[int] ):
__snake_case: List[str] = feature_size
__snake_case: Optional[int] = sampling_rate
__snake_case: Any = padding_value
__snake_case: Dict = kwargs.pop("""padding_side""" , """right""" )
__snake_case: Union[str, Any] = kwargs.pop("""return_attention_mask""" , A )
super().__init__(**A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A : Union[bool, str, PaddingStrategy] = True , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__snake_case: Optional[int] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
__snake_case: Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A ) == 0:
if return_attention_mask:
__snake_case: Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__snake_case: int = required_input[0]
if isinstance(A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__snake_case: Optional[int] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A ):
__snake_case: Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A ):
__snake_case: str = """tf"""
elif is_torch_tensor(A ):
__snake_case: str = """pt"""
elif isinstance(A , (int, float, list, tuple, np.ndarray) ):
__snake_case: List[str] = """np"""
else:
raise ValueError(
f'''type of {first_element} unknown: {type(A )}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__snake_case: List[Any] = to_numpy(A )
else:
__snake_case: Union[str, Any] = [to_numpy(A ) for v in value]
# Convert padding_strategy in PaddingStrategy
__snake_case: Union[str, Any] = self._get_padding_strategies(padding=A , max_length=A )
__snake_case: Any = processed_features[self.model_input_names[0]]
__snake_case: int = len(A )
if not all(len(A ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__snake_case: Union[str, Any] = []
for i in range(A ):
__snake_case: List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__snake_case: Tuple = self._truncate(
A , max_length=A , pad_to_multiple_of=A , truncation=A , )
truncated_inputs.append(A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__snake_case: Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__snake_case: List[str] = PaddingStrategy.MAX_LENGTH
__snake_case: List[Any] = {}
for i in range(A ):
# padding
__snake_case: Any = self._pad(
truncated_inputs[i] , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
for key, value in outputs.items():
if key not in batch_outputs:
__snake_case: Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
__snake_case: str = value.astype(np.floataa )
batch_outputs[key].append(A )
return BatchFeature(A , tensor_type=A )
def UpperCAmelCase__ ( self : int , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A : Optional[int] = None , A : Optional[bool] = None , ):
__snake_case: List[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__snake_case: List[str] = len(A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__snake_case: List[str] = np.ones(len(A ) , dtype=np.intaa )
if needs_to_be_padded:
__snake_case: Any = max_length - len(A )
if self.padding_side == "right":
if return_attention_mask:
__snake_case: Optional[int] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__snake_case: Any = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__snake_case: Union[str, Any] = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__snake_case: Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__snake_case: Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__snake_case: str = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : Optional[int] = None , A : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Tuple = len(A ) > max_length
if needs_to_be_truncated:
__snake_case: List[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__snake_case: int = processed_features["""attention_mask"""][:max_length]
return processed_features
def UpperCAmelCase__ ( self : int , A : int=False , A : int=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__snake_case: Optional[int] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A , A ):
__snake_case: Optional[int] = PaddingStrategy(A )
elif isinstance(A , A ):
__snake_case: Any = padding
else:
__snake_case: Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 293
| 0
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase : Optional[int] = "\\n\n"
__UpperCAmelCase : Tuple = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
__UpperCAmelCase : Tuple = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def UpperCAmelCase__ ( self : int , A : str , A : Optional[Any] , A : int = 16 , A : bool = True , A : Optional[int]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__snake_case: Optional[Any] = """cuda"""
else:
__snake_case: str = """cuda""" if torch.cuda.is_available() else """cpu"""
__snake_case: Dict = AutoModelForCausalLM.from_pretrained(A )
__snake_case: List[str] = model.to(A )
__snake_case: Optional[Any] = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__snake_case: Dict = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__snake_case: Tuple = model.config.max_length - 1
else:
__snake_case: Optional[Any] = model.config.max_length
__snake_case: Optional[int] = tokenizer(
A , add_special_tokens=A , padding=A , truncation=A , max_length=A , return_tensors="""pt""" , return_attention_mask=A , ).to(A )
__snake_case: Tuple = encodings["""input_ids"""]
__snake_case: Any = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__snake_case: Optional[int] = []
__snake_case: Optional[int] = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(A ) , A ) ):
__snake_case: Dict = min(start_index + batch_size , len(A ) )
__snake_case: Optional[int] = encoded_texts[start_index:end_index]
__snake_case: List[Any] = attn_masks[start_index:end_index]
if add_start_token:
__snake_case: Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
__snake_case: Optional[Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__snake_case: Union[str, Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A ), attn_mask] , dim=1 )
__snake_case: List[str] = encoded_batch
with torch.no_grad():
__snake_case: Union[str, Any] = model(A , attention_mask=A ).logits
__snake_case: List[str] = out_logits[..., :-1, :].contiguous()
__snake_case: Optional[Any] = labels[..., 1:].contiguous()
__snake_case: Dict = attn_mask[..., 1:].contiguous()
__snake_case: Optional[Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 354
|
from __future__ import annotations
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
return np.maximum(0 , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 293
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {"vocab_file": "spiece.model"}
__UpperCAmelCase : Optional[int] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
__UpperCAmelCase : Union[str, Any] = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
__UpperCAmelCase : Tuple = "▁"
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , A : Tuple , A : List[str]=True , A : List[str]=True , A : List[str]=False , A : Tuple="[CLS]" , A : Optional[Any]="[SEP]" , A : Dict="<unk>" , A : Optional[Any]="[SEP]" , A : Dict="<pad>" , A : str="[CLS]" , A : List[str]="[MASK]" , A : Optional[Dict[str, Any]] = None , **A : Tuple , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case: List[Any] = (
AddedToken(A , lstrip=A , rstrip=A , normalized=A )
if isinstance(A , A )
else mask_token
)
__snake_case: List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
__snake_case: Optional[Any] = do_lower_case
__snake_case: Tuple = remove_space
__snake_case: Tuple = keep_accents
__snake_case: int = vocab_file
__snake_case: Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase__ ( self : List[Any] ):
return len(self.sp_model )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[int] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
__snake_case: List[str] = self.__dict__.copy()
__snake_case: Any = None
return state
def __setstate__( self : Optional[Any] , A : str ):
__snake_case: str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__snake_case: int = {}
__snake_case: str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : str , A : Optional[Any] ):
if self.remove_space:
__snake_case: List[str] = """ """.join(inputs.strip().split() )
else:
__snake_case: Union[str, Any] = inputs
__snake_case: Union[str, Any] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__snake_case: str = unicodedata.normalize("""NFKD""" , A )
__snake_case: List[str] = """""".join([c for c in outputs if not unicodedata.combining(A )] )
if self.do_lower_case:
__snake_case: Dict = outputs.lower()
return outputs
def UpperCAmelCase__ ( self : Tuple , A : str ):
__snake_case: List[str] = self.preprocess_text(A )
__snake_case: Any = self.sp_model.encode(A , out_type=A )
__snake_case: List[Any] = []
for piece in pieces:
if len(A ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__snake_case: str = self.sp_model.EncodeAsPieces(piece[:-1].replace(A , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__snake_case: Tuple = cur_pieces[1:]
else:
__snake_case: Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A )
else:
new_pieces.append(A )
return new_pieces
def UpperCAmelCase__ ( self : int , A : Optional[Any] ):
return self.sp_model.PieceToId(A )
def UpperCAmelCase__ ( self : Dict , A : str ):
return self.sp_model.IdToPiece(A )
def UpperCAmelCase__ ( self : str , A : List[str] ):
__snake_case: Optional[Any] = []
__snake_case: int = """"""
__snake_case: int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
__snake_case: List[Any] = True
__snake_case: Dict = []
else:
current_sub_tokens.append(A )
__snake_case: List[Any] = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase__ ( self : List[str] , A : List[int] , A : Optional[List[int]] = None ):
__snake_case: Any = [self.sep_token_id]
__snake_case: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[int] , A : Optional[List[int]] = None , A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ ( self : Any , A : List[int] , A : Optional[List[int]] = None ):
__snake_case: Dict = [self.sep_token_id]
__snake_case: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case: Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
__snake_case: str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 355
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("""foo.json""",)] )
def UpperCAmelCase__ ( self : List[str] , A : Optional[Any] ):
__snake_case: Any = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
__snake_case: Optional[int] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: str = AutoConfig.from_pretrained("""gpt2""" )
__snake_case: Any = GenerationConfig.from_model_config(A )
__snake_case: str = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Tuple = {
"""max_new_tokens""": 1_024,
"""foo""": """bar""",
}
__snake_case: List[str] = copy.deepcopy(A )
__snake_case: Optional[int] = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {"""foo""": """bar"""} )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Optional[int] = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(A )
__snake_case: Any = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__snake_case: int = GenerationConfig.from_model_config(A )
assert not hasattr(A , """foo""" ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
__snake_case: Tuple = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase__ ( cls : List[str] ):
__snake_case: Optional[int] = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[int] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__snake_case: str = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""test-generation-config""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__snake_case: int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[int] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 293
| 0
|
import csv
import tweepy
# Twitter API credentials
__UpperCAmelCase : str = ""
__UpperCAmelCase : Optional[Any] = ""
__UpperCAmelCase : Optional[Any] = ""
__UpperCAmelCase : Any = ""
def A__ ( SCREAMING_SNAKE_CASE__) -> None:
# authorize twitter, initialize tweepy
__snake_case: Any = tweepy.OAuthHandler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
auth.set_access_token(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Dict = tweepy.API(SCREAMING_SNAKE_CASE__)
# initialize a list to hold all the tweepy Tweets
__snake_case: Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__snake_case: int = api.user_timeline(screen_name=SCREAMING_SNAKE_CASE__ , count=200)
# save most recent tweets
alltweets.extend(SCREAMING_SNAKE_CASE__)
# save the id of the oldest tweet less one
__snake_case: Optional[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(SCREAMING_SNAKE_CASE__) > 0:
print(F'''getting tweets before {oldest}''')
# all subsequent requests use the max_id param to prevent duplicates
__snake_case: str = api.user_timeline(
screen_name=SCREAMING_SNAKE_CASE__ , count=200 , max_id=SCREAMING_SNAKE_CASE__)
# save most recent tweets
alltweets.extend(SCREAMING_SNAKE_CASE__)
# update the id of the oldest tweet less one
__snake_case: str = alltweets[-1].id - 1
print(F'''...{len(SCREAMING_SNAKE_CASE__)} tweets downloaded so far''')
# transform the tweepy tweets into a 2D array that will populate the csv
__snake_case: Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , """w""") as f:
__snake_case: str = csv.writer(SCREAMING_SNAKE_CASE__)
writer.writerow(["""id""", """created_at""", """text"""])
writer.writerows(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 356
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__UpperCAmelCase : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
if args.student_type == "roberta":
__snake_case: Optional[Any] = False
elif args.student_type == "gpt2":
__snake_case: str = False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
if args.student_type == "roberta":
__snake_case: Optional[int] = False
def A__ ( ) -> Tuple:
__snake_case: Optional[int] = argparse.ArgumentParser(description="""Training""")
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""")
parser.add_argument(
"""--dump_path""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The output directory (log, checkpoints, parameters, etc.)""")
parser.add_argument(
"""--data_file""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=SCREAMING_SNAKE_CASE__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to the student configuration.""")
parser.add_argument(
"""--student_pretrained_weights""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Load student initialization checkpoint.""")
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""Teacher type (BERT, RoBERTa).""")
parser.add_argument("""--teacher_name""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The teacher model.""")
parser.add_argument("""--temperature""" , default=2.0 , type=SCREAMING_SNAKE_CASE__ , help="""Temperature for the softmax temperature.""")
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the distillation loss. Must be >=0.""")
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the CLM loss. Must be >=0.""")
parser.add_argument("""--alpha_mse""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the MSE loss. Must be >=0.""")
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""")
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""")
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to mask out.""")
parser.add_argument("""--word_keep""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to keep.""")
parser.add_argument("""--word_rand""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to randomly replace.""")
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=SCREAMING_SNAKE_CASE__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=SCREAMING_SNAKE_CASE__ , help="""The token counts in the data_file for MLM.""")
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="""Number of pass on the whole dataset.""")
parser.add_argument("""--batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""Batch size (for each process).""")
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=SCREAMING_SNAKE_CASE__ , help="""Linear warmup proportion.""")
parser.add_argument("""--weight_decay""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Weight decay if we apply some.""")
parser.add_argument("""--learning_rate""" , default=5e-4 , type=SCREAMING_SNAKE_CASE__ , help="""The initial learning rate for Adam.""")
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=SCREAMING_SNAKE_CASE__ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm.""")
parser.add_argument("""--initializer_range""" , default=0.02 , type=SCREAMING_SNAKE_CASE__ , help="""Random initialization range.""")
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of GPUs in the node.""")
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Distributed training - Local rank""")
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=56 , help="""Random seed""")
parser.add_argument("""--log_interval""" , type=SCREAMING_SNAKE_CASE__ , default=500 , help="""Tensorboard logging interval.""")
parser.add_argument("""--checkpoint_interval""" , type=SCREAMING_SNAKE_CASE__ , default=4000 , help="""Checkpoint interval.""")
__snake_case: List[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE__)
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE__)
set_seed(SCREAMING_SNAKE_CASE__)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""")
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''')
# SAVE PARAMS #
logger.info(F'''Param: {args}''')
with open(os.path.join(args.dump_path , """parameters.json""") , """w""") as f:
json.dump(vars(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , indent=4)
git_log(args.dump_path)
__snake_case , __snake_case , __snake_case: str = MODEL_CLASSES[args.student_type]
__snake_case , __snake_case , __snake_case: Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__snake_case: Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name)
__snake_case: str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__snake_case: List[str] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''')
__snake_case: Optional[Any] = special_tok_ids
__snake_case: List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file , """rb""") as fp:
__snake_case: int = pickle.load(SCREAMING_SNAKE_CASE__)
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''')
with open(args.token_counts , """rb""") as fp:
__snake_case: List[str] = pickle.load(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = np.maximum(SCREAMING_SNAKE_CASE__ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__snake_case: Union[str, Any] = 0.0 # do not predict special tokens
__snake_case: Any = torch.from_numpy(SCREAMING_SNAKE_CASE__)
else:
__snake_case: Any = None
__snake_case: Union[str, Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE__ , data=SCREAMING_SNAKE_CASE__)
logger.info("""Data loader created.""")
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''')
__snake_case: Tuple = student_config_class.from_pretrained(args.student_config)
__snake_case: List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''')
__snake_case: Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE__)
else:
__snake_case: Union[str, Any] = student_model_class(SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''')
logger.info("""Student loaded.""")
# TEACHER #
__snake_case: Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''')
logger.info(F'''Teacher loaded from {args.teacher_name}.''')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__snake_case: List[str] = Distiller(
params=SCREAMING_SNAKE_CASE__ , dataset=SCREAMING_SNAKE_CASE__ , token_probs=SCREAMING_SNAKE_CASE__ , student=SCREAMING_SNAKE_CASE__ , teacher=SCREAMING_SNAKE_CASE__)
distiller.train()
logger.info("""Let's go get some drinks.""")
if __name__ == "__main__":
main()
| 293
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionXLImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
__snake_case: List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=A , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__snake_case: List[Any] = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
__snake_case: Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case: str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
__snake_case: Optional[int] = CLIPTextModel(A )
__snake_case: Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=A )
__snake_case: List[Any] = CLIPTextModelWithProjection(A )
__snake_case: List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=A )
__snake_case: Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCAmelCase__ ( self : str , A : Union[str, Any] , A : Any=0 ):
__snake_case: Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
__snake_case: Optional[Any] = image / 2 + 0.5
if str(A ).startswith("""mps""" ):
__snake_case: Optional[int] = torch.manual_seed(A )
else:
__snake_case: Tuple = torch.Generator(device=A ).manual_seed(A )
__snake_case: Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
__snake_case: Tuple = self.get_dummy_components()
__snake_case: List[str] = StableDiffusionXLImgaImgPipeline(**A )
__snake_case: Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
__snake_case: List[Any] = self.get_dummy_inputs(A )
__snake_case: Tuple = sd_pipe(**A ).images
__snake_case: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case: List[str] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : int ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : List[Any] ):
pass
def UpperCAmelCase__ ( self : Any ):
__snake_case: Any = self.get_dummy_components()
__snake_case: Tuple = StableDiffusionXLImgaImgPipeline(**A )
__snake_case: int = sd_pipe.to(A )
__snake_case: List[str] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
__snake_case: Optional[Any] = self.get_dummy_inputs(A )
__snake_case: Optional[Any] = 3 * ["""this is a negative prompt"""]
__snake_case: List[str] = negative_prompt
__snake_case: Tuple = 3 * [inputs["""prompt"""]]
__snake_case: Optional[Any] = sd_pipe(**A )
__snake_case: List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__snake_case: Tuple = self.get_dummy_inputs(A )
__snake_case: List[Any] = 3 * ["""this is a negative prompt"""]
__snake_case: Any = 3 * [inputs.pop("""prompt""" )]
(
__snake_case
): int = sd_pipe.encode_prompt(A , negative_prompt=A )
__snake_case: Optional[Any] = sd_pipe(
**A , prompt_embeds=A , negative_prompt_embeds=A , pooled_prompt_embeds=A , negative_pooled_prompt_embeds=A , )
__snake_case: str = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] , A : List[str] , A : Union[str, Any]="cpu" , A : List[Any]=torch.floataa , A : Any=0 ):
__snake_case: int = torch.Generator(device=A ).manual_seed(A )
__snake_case: Union[str, Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
__snake_case: int = torch.from_numpy(A ).to(device=A , dtype=A )
__snake_case: Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[int] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
__snake_case: Dict = self.get_inputs(A )
__snake_case: Union[str, Any] = pipe(**A ).images
__snake_case: Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__snake_case: Dict = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 357
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 293
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCAmelCase : str = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
def __init__( self : Any , A : int , A : int , A : float , **A : Optional[int] ):
__snake_case: List[str] = feature_size
__snake_case: Optional[int] = sampling_rate
__snake_case: Any = padding_value
__snake_case: Dict = kwargs.pop("""padding_side""" , """right""" )
__snake_case: Union[str, Any] = kwargs.pop("""return_attention_mask""" , A )
super().__init__(**A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A : Union[bool, str, PaddingStrategy] = True , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__snake_case: Optional[int] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
__snake_case: Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A ) == 0:
if return_attention_mask:
__snake_case: Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__snake_case: int = required_input[0]
if isinstance(A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__snake_case: Optional[int] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A ):
__snake_case: Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A ):
__snake_case: str = """tf"""
elif is_torch_tensor(A ):
__snake_case: str = """pt"""
elif isinstance(A , (int, float, list, tuple, np.ndarray) ):
__snake_case: List[str] = """np"""
else:
raise ValueError(
f'''type of {first_element} unknown: {type(A )}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__snake_case: List[Any] = to_numpy(A )
else:
__snake_case: Union[str, Any] = [to_numpy(A ) for v in value]
# Convert padding_strategy in PaddingStrategy
__snake_case: Union[str, Any] = self._get_padding_strategies(padding=A , max_length=A )
__snake_case: Any = processed_features[self.model_input_names[0]]
__snake_case: int = len(A )
if not all(len(A ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__snake_case: Union[str, Any] = []
for i in range(A ):
__snake_case: List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__snake_case: Tuple = self._truncate(
A , max_length=A , pad_to_multiple_of=A , truncation=A , )
truncated_inputs.append(A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__snake_case: Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__snake_case: List[str] = PaddingStrategy.MAX_LENGTH
__snake_case: List[Any] = {}
for i in range(A ):
# padding
__snake_case: Any = self._pad(
truncated_inputs[i] , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
for key, value in outputs.items():
if key not in batch_outputs:
__snake_case: Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
__snake_case: str = value.astype(np.floataa )
batch_outputs[key].append(A )
return BatchFeature(A , tensor_type=A )
def UpperCAmelCase__ ( self : int , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A : Optional[int] = None , A : Optional[bool] = None , ):
__snake_case: List[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__snake_case: List[str] = len(A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__snake_case: List[str] = np.ones(len(A ) , dtype=np.intaa )
if needs_to_be_padded:
__snake_case: Any = max_length - len(A )
if self.padding_side == "right":
if return_attention_mask:
__snake_case: Optional[int] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__snake_case: Any = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__snake_case: Union[str, Any] = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__snake_case: Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__snake_case: Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__snake_case: str = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : Optional[int] = None , A : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Tuple = len(A ) > max_length
if needs_to_be_truncated:
__snake_case: List[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__snake_case: int = processed_features["""attention_mask"""][:max_length]
return processed_features
def UpperCAmelCase__ ( self : int , A : int=False , A : int=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__snake_case: Optional[int] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A , A ):
__snake_case: Optional[int] = PaddingStrategy(A )
elif isinstance(A , A ):
__snake_case: Any = padding
else:
__snake_case: Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 358
|
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase : int = "docs/source/en/_toctree.yml"
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__)
for doc in model_doc:
counts[doc["local"]] += 1
__snake_case: Dict = [key for key, value in counts.items() if value > 1]
__snake_case: Optional[Any] = []
for duplicate_key in duplicates:
__snake_case: Tuple = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key})
if len(SCREAMING_SNAKE_CASE__) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""")
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1])
# Sort
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__: s["title"].lower())
def A__ ( SCREAMING_SNAKE_CASE__=False) -> List[str]:
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""") as f:
__snake_case: Optional[int] = yaml.safe_load(f.read())
# Get to the API doc
__snake_case: Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case: str = content[api_idx]["""sections"""]
# Then to the model doc
__snake_case: List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__snake_case: Dict = api_doc[model_idx]["""sections"""]
__snake_case: int = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE__) if """sections""" in section]
__snake_case: Optional[int] = False
for idx, modality_doc in modalities_docs:
__snake_case: Dict = modality_doc["""sections"""]
__snake_case: List[str] = clean_model_doc_toc(SCREAMING_SNAKE_CASE__)
if old_modality_doc != new_modality_doc:
__snake_case: List[str] = True
if overwrite:
__snake_case: Dict = new_modality_doc
if diff:
if overwrite:
__snake_case: Dict = model_doc
__snake_case: int = api_doc
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""") as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE__ , allow_unicode=SCREAMING_SNAKE_CASE__))
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""")
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 293
| 0
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = """laion/clap-htsat-unfused"""
__snake_case: Any = tempfile.mkdtemp()
def UpperCAmelCase__ ( self : Optional[int] , **A : Optional[int] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **A )
def UpperCAmelCase__ ( self : List[str] , **A : Optional[int] ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A )
def UpperCAmelCase__ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: int = self.get_tokenizer()
__snake_case: Tuple = self.get_feature_extractor()
__snake_case: Union[str, Any] = ClapProcessor(tokenizer=A , feature_extractor=A )
processor.save_pretrained(self.tmpdirname )
__snake_case: int = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__snake_case: List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__snake_case: Any = self.get_feature_extractor(do_normalize=A , padding_value=1.0 )
__snake_case: Optional[Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Dict = self.get_feature_extractor()
__snake_case: int = self.get_tokenizer()
__snake_case: Any = ClapProcessor(tokenizer=A , feature_extractor=A )
__snake_case: Optional[int] = floats_list((3, 1_000) )
__snake_case: Tuple = feature_extractor(A , return_tensors="""np""" )
__snake_case: List[str] = processor(audios=A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: List[str] = self.get_feature_extractor()
__snake_case: List[Any] = self.get_tokenizer()
__snake_case: Optional[Any] = ClapProcessor(tokenizer=A , feature_extractor=A )
__snake_case: Dict = """This is a test string"""
__snake_case: Tuple = processor(text=A )
__snake_case: Optional[int] = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: str = self.get_feature_extractor()
__snake_case: int = self.get_tokenizer()
__snake_case: Dict = ClapProcessor(tokenizer=A , feature_extractor=A )
__snake_case: Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case: List[str] = processor.batch_decode(A )
__snake_case: Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Union[str, Any] = self.get_feature_extractor()
__snake_case: Optional[Any] = self.get_tokenizer()
__snake_case: List[str] = ClapProcessor(tokenizer=A , feature_extractor=A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 359
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A__ ( SCREAMING_SNAKE_CASE__) -> list[list[float]]:
__snake_case: Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE__) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
# Calculate the determinant of the matrix
__snake_case: Tuple = float(
d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creates a copy of the matrix with swapped positions of the elements
__snake_case: Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
__snake_case , __snake_case: Optional[Any] = matrix[1][1], matrix[0][0]
__snake_case , __snake_case: Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE__)) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE__) == 3
and len(matrix[0]) == 3
and len(matrix[1]) == 3
and len(matrix[2]) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__snake_case: Any = float(
(
(d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
)
- (
(d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creating cofactor matrix
__snake_case: Tuple = [
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
]
__snake_case: Dict = (d(matrix[1][1]) * d(matrix[2][2])) - (
d(matrix[1][2]) * d(matrix[2][1])
)
__snake_case: Tuple = -(
(d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
)
__snake_case: Optional[int] = (d(matrix[1][0]) * d(matrix[2][1])) - (
d(matrix[1][1]) * d(matrix[2][0])
)
__snake_case: Union[str, Any] = -(
(d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
)
__snake_case: str = (d(matrix[0][0]) * d(matrix[2][2])) - (
d(matrix[0][2]) * d(matrix[2][0])
)
__snake_case: List[Any] = -(
(d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
)
__snake_case: Optional[Any] = (d(matrix[0][1]) * d(matrix[1][2])) - (
d(matrix[0][2]) * d(matrix[1][1])
)
__snake_case: List[str] = -(
(d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
)
__snake_case: Optional[int] = (d(matrix[0][0]) * d(matrix[1][1])) - (
d(matrix[0][1]) * d(matrix[1][0])
)
# Transpose the cofactor matrix (Adjoint matrix)
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
__snake_case: Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE__)
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE__)) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""")
| 293
| 0
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__UpperCAmelCase : Optional[Any] = {"UserAgent": UserAgent().random}
def A__ ( SCREAMING_SNAKE_CASE__) -> dict:
__snake_case: int = script.contents[0]
__snake_case: int = json.loads(data[data.find("""{\"config\"""") : -1])
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __snake_case :
'''simple docstring'''
def __init__( self : Optional[int] , A : int ):
__snake_case: str = f'''https://www.instagram.com/{username}/'''
__snake_case: Optional[int] = self.get_json()
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = requests.get(self.url , headers=A ).text
__snake_case: int = BeautifulSoup(A , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : int ):
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : List[Any] ):
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def UpperCAmelCase__ ( self : Tuple ):
return self.user_data["username"]
@property
def UpperCAmelCase__ ( self : List[Any] ):
return self.user_data["full_name"]
@property
def UpperCAmelCase__ ( self : Tuple ):
return self.user_data["biography"]
@property
def UpperCAmelCase__ ( self : Dict ):
return self.user_data["business_email"]
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return self.user_data["external_url"]
@property
def UpperCAmelCase__ ( self : Tuple ):
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCAmelCase__ ( self : List[str] ):
return self.user_data["edge_follow"]["count"]
@property
def UpperCAmelCase__ ( self : List[Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCAmelCase__ ( self : Dict ):
return self.user_data["profile_pic_url_hd"]
@property
def UpperCAmelCase__ ( self : int ):
return self.user_data["is_verified"]
@property
def UpperCAmelCase__ ( self : str ):
return self.user_data["is_private"]
def A__ ( SCREAMING_SNAKE_CASE__ = "github") -> None:
import os
if os.environ.get("""CI"""):
return # test failing on GitHub Actions
__snake_case: int = InstagramUser(SCREAMING_SNAKE_CASE__)
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , SCREAMING_SNAKE_CASE__)
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""")
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Dict = InstagramUser("github")
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }')
| 360
|
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE__)
if number < 1:
__snake_case: Optional[int] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE__)
elif number == 1:
return 3
elif number == 2:
return 5
else:
__snake_case: List[Any] = int(math.log(number // 3 , 2)) + 2
__snake_case: str = [3, 5]
__snake_case: int = 2
__snake_case: List[str] = 3
for block in range(1 , SCREAMING_SNAKE_CASE__):
for _ in range(SCREAMING_SNAKE_CASE__):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__UpperCAmelCase : Optional[int] = 0
try:
__UpperCAmelCase : int = proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 293
| 0
|
__UpperCAmelCase : Union[str, Any] = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
__UpperCAmelCase : Optional[int] = ["a", "b", "c", "d", "e"]
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
__snake_case: Optional[int] = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE__)
__snake_case: str = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__snake_case: Tuple = topological_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE__)
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE__) != len(SCREAMING_SNAKE_CASE__):
for vertice in vertices:
if vertice not in visited:
__snake_case: int = topological_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# return sort
return sort
if __name__ == "__main__":
__UpperCAmelCase : Dict = topological_sort("a", [], [])
print(sort)
| 361
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
class __snake_case ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
@register_to_config
def __init__( self : Union[str, Any] , A : int = 3 , A : int = 3 , A : Tuple[str] = ("DownEncoderBlock2D",) , A : Tuple[str] = ("UpDecoderBlock2D",) , A : Tuple[int] = (64,) , A : int = 1 , A : str = "silu" , A : int = 4 , A : int = 32 , A : int = 32 , A : float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__snake_case: Any = Encoder(
in_channels=A , out_channels=A , down_block_types=A , block_out_channels=A , layers_per_block=A , act_fn=A , norm_num_groups=A , double_z=A , )
# pass init params to Decoder
__snake_case: int = Decoder(
in_channels=A , out_channels=A , up_block_types=A , block_out_channels=A , layers_per_block=A , norm_num_groups=A , act_fn=A , )
__snake_case: Dict = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__snake_case: int = nn.Convad(A , A , 1 )
__snake_case: List[str] = False
__snake_case: Optional[int] = False
# only relevant if vae tiling is enabled
__snake_case: Any = self.config.sample_size
__snake_case: int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__snake_case: Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__snake_case: Optional[int] = 0.25
def UpperCAmelCase__ ( self : int , A : List[str] , A : Optional[Any]=False ):
if isinstance(A , (Encoder, Decoder) ):
__snake_case: str = value
def UpperCAmelCase__ ( self : str , A : bool = True ):
__snake_case: Union[str, Any] = use_tiling
def UpperCAmelCase__ ( self : Optional[int] ):
self.enable_tiling(A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[str] = True
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Any = {}
def fn_recursive_add_processors(A : str , A : torch.nn.Module , A : Dict[str, AttentionProcessor] ):
if hasattr(A , """set_processor""" ):
__snake_case: List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , A , A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A , A , A )
return processors
def UpperCAmelCase__ ( self : Optional[int] , A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
__snake_case: Any = len(self.attn_processors.keys() )
if isinstance(A , A ) and len(A ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(A )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(A : str , A : torch.nn.Module , A : Optional[Any] ):
if hasattr(A , """set_processor""" ):
if not isinstance(A , A ):
module.set_processor(A )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , A , A )
for name, module in self.named_children():
fn_recursive_attn_processor(A , A , A )
def UpperCAmelCase__ ( self : List[str] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(A , return_dict=A )
if self.use_slicing and x.shape[0] > 1:
__snake_case: List[Any] = [self.encoder(A ) for x_slice in x.split(1 )]
__snake_case: Optional[Any] = torch.cat(A )
else:
__snake_case: str = self.encoder(A )
__snake_case: Any = self.quant_conv(A )
__snake_case: Tuple = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(A , return_dict=A )
__snake_case: Optional[int] = self.post_quant_conv(A )
__snake_case: Union[str, Any] = self.decoder(A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
@apply_forward_hook
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_slicing and z.shape[0] > 1:
__snake_case: Union[str, Any] = [self._decode(A ).sample for z_slice in z.split(1 )]
__snake_case: List[str] = torch.cat(A )
else:
__snake_case: int = self._decode(A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : Any , A : Tuple , A : int , A : List[Any] ):
__snake_case: int = min(a.shape[2] , b.shape[2] , A )
for y in range(A ):
__snake_case: Dict = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[Any] , A : List[str] , A : List[str] ):
__snake_case: Dict = min(a.shape[3] , b.shape[3] , A )
for x in range(A ):
__snake_case: Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def UpperCAmelCase__ ( self : int , A : torch.FloatTensor , A : bool = True ):
__snake_case: List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__snake_case: Dict = int(self.tile_latent_min_size * self.tile_overlap_factor )
__snake_case: Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__snake_case: Optional[int] = []
for i in range(0 , x.shape[2] , A ):
__snake_case: Optional[int] = []
for j in range(0 , x.shape[3] , A ):
__snake_case: int = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__snake_case: Tuple = self.encoder(A )
__snake_case: Dict = self.quant_conv(A )
row.append(A )
rows.append(A )
__snake_case: Tuple = []
for i, row in enumerate(A ):
__snake_case: str = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Optional[Any] = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: Tuple = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Tuple = torch.cat(A , dim=2 )
__snake_case: Optional[int] = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : torch.FloatTensor , A : bool = True ):
__snake_case: Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__snake_case: str = int(self.tile_sample_min_size * self.tile_overlap_factor )
__snake_case: int = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__snake_case: List[Any] = []
for i in range(0 , z.shape[2] , A ):
__snake_case: Optional[Any] = []
for j in range(0 , z.shape[3] , A ):
__snake_case: Dict = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__snake_case: Any = self.post_quant_conv(A )
__snake_case: Optional[Any] = self.decoder(A )
row.append(A )
rows.append(A )
__snake_case: Optional[Any] = []
for i, row in enumerate(A ):
__snake_case: Optional[Any] = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Tuple = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: List[str] = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Dict = torch.cat(A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : List[Any] , A : torch.FloatTensor , A : bool = False , A : bool = True , A : Optional[torch.Generator] = None , ):
__snake_case: Optional[Any] = sample
__snake_case: Union[str, Any] = self.encode(A ).latent_dist
if sample_posterior:
__snake_case: Optional[Any] = posterior.sample(generator=A )
else:
__snake_case: Dict = posterior.mode()
__snake_case: Any = self.decode(A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
| 293
| 0
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def A__ ( SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> int:
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__)
@dataclass
class __snake_case :
'''simple docstring'''
lowerCAmelCase__ = field(
metadata={"""help""": """The csv file to plot."""} , )
lowerCAmelCase__ = field(
default=__lowerCamelCase , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
lowerCAmelCase__ = field(
default=__lowerCamelCase , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
lowerCAmelCase__ = field(
default=__lowerCamelCase , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
lowerCAmelCase__ = field(
default=__lowerCamelCase , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
lowerCAmelCase__ = field(
default=__lowerCamelCase , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
lowerCAmelCase__ = list_field(
default=__lowerCamelCase , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
try:
int(SCREAMING_SNAKE_CASE__)
return True
except ValueError:
return False
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
try:
float(SCREAMING_SNAKE_CASE__)
return True
except ValueError:
return False
class __snake_case :
'''simple docstring'''
def __init__( self : Tuple , A : Union[str, Any] ):
__snake_case: Any = args
__snake_case: Any = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
__snake_case: Any = csv.DictReader(A )
for row in reader:
__snake_case: Union[str, Any] = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
__snake_case: List[Any] = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
__snake_case: List[str] = float(row["""result"""] )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = plt.subplots()
__snake_case: str = """Time usage""" if self.args.is_time else """Memory usage"""
__snake_case: Optional[Any] = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__snake_case: List[Any] = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
__snake_case: List[Any] = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
__snake_case: List[str] = self.result_dict[model_name]["""result"""]
(__snake_case): str = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__snake_case: str = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__snake_case: Optional[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=A , )
else:
__snake_case: int = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
(__snake_case): Optional[int] = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
__snake_case: Union[str, Any] = np.asarray(A , A )[: len(A )]
plt.scatter(
A , A , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(A , A , """--""" )
title_str += f''' {label_model_name} vs.'''
__snake_case: Any = title_str[:-4]
__snake_case: List[Any] = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(A )
plt.xlabel(A )
plt.ylabel(A )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def A__ ( ) -> Dict:
__snake_case: Union[str, Any] = HfArgumentParser(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = parser.parse_args_into_dataclasses()[0]
__snake_case: List[Any] = Plot(args=SCREAMING_SNAKE_CASE__)
plot.plot()
if __name__ == "__main__":
main()
| 362
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """sew-d"""
def __init__( self : Dict , A : Any=32 , A : Dict=768 , A : Optional[Any]=12 , A : Union[str, Any]=12 , A : Union[str, Any]=3_072 , A : Optional[Any]=2 , A : Union[str, Any]=512 , A : List[Any]=256 , A : Dict=True , A : Union[str, Any]=True , A : Optional[int]=("p2c", "c2p") , A : str="layer_norm" , A : Dict="gelu_python" , A : Tuple=0.1 , A : Any=0.1 , A : Tuple=0.1 , A : Optional[int]=0.0 , A : Any=0.1 , A : Any=0.02 , A : Dict=1E-7 , A : str=1E-5 , A : int="group" , A : int="gelu" , A : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A : Optional[int]=False , A : int=128 , A : int=16 , A : Optional[Any]=True , A : List[Any]=0.05 , A : Any=10 , A : Dict=2 , A : List[Any]=0.0 , A : Union[str, Any]=10 , A : int=0 , A : List[Any]="mean" , A : Union[str, Any]=False , A : Any=False , A : Optional[int]=256 , A : List[Any]=0 , A : Any=1 , A : List[Any]=2 , **A : List[Any] , ):
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
__snake_case: Optional[int] = hidden_size
__snake_case: str = feat_extract_norm
__snake_case: int = feat_extract_activation
__snake_case: str = list(A )
__snake_case: Any = list(A )
__snake_case: str = list(A )
__snake_case: Union[str, Any] = conv_bias
__snake_case: int = num_conv_pos_embeddings
__snake_case: str = num_conv_pos_embedding_groups
__snake_case: List[Any] = len(self.conv_dim )
__snake_case: List[str] = num_hidden_layers
__snake_case: Union[str, Any] = intermediate_size
__snake_case: Dict = squeeze_factor
__snake_case: List[Any] = max_position_embeddings
__snake_case: List[Any] = position_buckets
__snake_case: List[str] = share_att_key
__snake_case: int = relative_attention
__snake_case: Union[str, Any] = norm_rel_ebd
__snake_case: List[str] = list(A )
__snake_case: Tuple = hidden_act
__snake_case: List[Any] = num_attention_heads
__snake_case: str = hidden_dropout
__snake_case: int = attention_dropout
__snake_case: Dict = activation_dropout
__snake_case: Any = feat_proj_dropout
__snake_case: int = final_dropout
__snake_case: List[Any] = layer_norm_eps
__snake_case: List[str] = feature_layer_norm_eps
__snake_case: List[Any] = initializer_range
__snake_case: List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case: List[Any] = apply_spec_augment
__snake_case: List[Any] = mask_time_prob
__snake_case: str = mask_time_length
__snake_case: List[str] = mask_time_min_masks
__snake_case: str = mask_feature_prob
__snake_case: Optional[int] = mask_feature_length
__snake_case: Dict = mask_feature_min_masks
# ctc loss
__snake_case: Any = ctc_loss_reduction
__snake_case: str = ctc_zero_infinity
# sequence classification
__snake_case: Optional[Any] = use_weighted_layer_sum
__snake_case: List[Any] = classifier_proj_size
@property
def UpperCAmelCase__ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 293
| 0
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A__ ( SCREAMING_SNAKE_CASE__ = 3) -> qiskit.result.counts.Counts:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
raise TypeError("""number of qubits must be a integer.""")
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""")
if math.floor(SCREAMING_SNAKE_CASE__) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""")
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""")
__snake_case: int = QuantumRegister(SCREAMING_SNAKE_CASE__ , """qr""")
__snake_case: List[str] = ClassicalRegister(SCREAMING_SNAKE_CASE__ , """cr""")
__snake_case: Optional[Any] = QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.h(number_of_qubits - i - 1)
counter -= 1
for j in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
for k in range(number_of_qubits // 2):
quantum_circuit.swap(SCREAMING_SNAKE_CASE__ , number_of_qubits - k - 1)
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# simulate with 10000 shots
__snake_case: Union[str, Any] = Aer.get_backend("""qasm_simulator""")
__snake_case: Optional[Any] = execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1_0000)
return job.result().get_counts(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 363
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCAmelCase : Any = random.Random()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> Any:
if rng is None:
__snake_case: Dict = global_rng
__snake_case: str = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : List[str] , A : List[Any]=7 , A : Optional[int]=400 , A : List[Any]=2_000 , A : Dict=2_048 , A : Tuple=128 , A : List[Any]=1 , A : Tuple=512 , A : str=30 , A : Optional[Any]=44_100 , ):
__snake_case: Dict = parent
__snake_case: Optional[Any] = batch_size
__snake_case: Optional[int] = min_seq_length
__snake_case: Optional[Any] = max_seq_length
__snake_case: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case: Any = spectrogram_length
__snake_case: Any = feature_size
__snake_case: Union[str, Any] = num_audio_channels
__snake_case: Any = hop_length
__snake_case: List[str] = chunk_length
__snake_case: Any = sampling_rate
def UpperCAmelCase__ ( self : List[Any] ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase__ ( self : List[str] , A : str=False , A : int=False ):
def _flatten(A : Dict ):
return list(itertools.chain(*A ) )
if equal_length:
__snake_case: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case: int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case: Tuple = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = TvltFeatureExtractor
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: str = TvltFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : int ):
__snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A , """spectrogram_length""" ) )
self.assertTrue(hasattr(A , """feature_size""" ) )
self.assertTrue(hasattr(A , """num_audio_channels""" ) )
self.assertTrue(hasattr(A , """hop_length""" ) )
self.assertTrue(hasattr(A , """chunk_length""" ) )
self.assertTrue(hasattr(A , """sampling_rate""" ) )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: Tuple = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
__snake_case: int = self.feature_extraction_class.from_pretrained(A )
__snake_case: List[str] = feat_extract_first.to_dict()
__snake_case: str = feat_extract_second.to_dict()
__snake_case: List[Any] = dict_first.pop("""mel_filters""" )
__snake_case: str = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: str = os.path.join(A , """feat_extract.json""" )
feat_extract_first.to_json_file(A )
__snake_case: List[Any] = self.feature_extraction_class.from_json_file(A )
__snake_case: Dict = feat_extract_first.to_dict()
__snake_case: Any = feat_extract_second.to_dict()
__snake_case: int = dict_first.pop("""mel_filters""" )
__snake_case: int = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Any ):
# Initialize feature_extractor
__snake_case: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__snake_case: Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: str = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
__snake_case: int = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__snake_case: Optional[int] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__snake_case: Union[str, Any] = feature_extractor(
A , return_tensors="""np""" , sampling_rate=44_100 , mask_audio=A ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__snake_case: Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case: Union[str, Any] = np.asarray(A )
__snake_case: List[Any] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[str] ):
__snake_case: Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__snake_case: List[Any] = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = self._load_datasamples(1 )
__snake_case: Optional[int] = TvltFeatureExtractor()
__snake_case: Optional[Any] = feature_extractor(A , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__snake_case: str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A , atol=1E-4 ) )
| 293
| 0
|
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase : Dict = 1.6_0_2_1e-1_9 # units = C
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0) != 1:
raise ValueError("""You cannot supply more or less than 2 values""")
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""")
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""")
elif mobility < 0:
raise ValueError("""mobility cannot be negative""")
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case: Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase__ ( self : str ):
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self : List[str] , A : Union[str, List[str]] , A : int = 512 , A : int = 512 , A : int = 50 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , A : Optional[torch.FloatTensor] = None , **A : Optional[Any] , ):
if isinstance(A , A ):
__snake_case: int = 1
elif isinstance(A , A ):
__snake_case: Optional[Any] = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
__snake_case: Tuple = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case: Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case: List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case: Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case: Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case: List[Any] = text_embeddings.shape
__snake_case: Tuple = text_embeddings.repeat(1 , A , 1 )
__snake_case: Dict = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case: List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case: List[str]
if negative_prompt is None:
__snake_case: Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
__snake_case: List[str] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case: str = negative_prompt
__snake_case: Any = text_input_ids.shape[-1]
__snake_case: Dict = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
__snake_case: Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case: Optional[Any] = uncond_embeddings.shape[1]
__snake_case: str = uncond_embeddings.repeat(A , A , 1 )
__snake_case: List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case: Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case: Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case: List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case: Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case: Any = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
__snake_case: Tuple = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
__snake_case: Dict = torch.randn(
A , generator=A , device=self.device , dtype=A )
__snake_case: Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case: Optional[int] = latents_reference.to(self.device )
__snake_case: List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case: int = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case: Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case: int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case: Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case: List[Any] = 0 if dx < 0 else dx
__snake_case: Dict = 0 if dy < 0 else dy
__snake_case: List[str] = max(-dx , 0 )
__snake_case: int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case: List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case: str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case: Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case: Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case: int = {}
if accepts_eta:
__snake_case: Optional[Any] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
__snake_case: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case: Dict = self.scheduler.scale_model_input(A , A )
# predict the noise residual
__snake_case: List[Any] = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case: Any = noise_pred.chunk(2 )
__snake_case: Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case: str = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
__snake_case: Optional[int] = 1 / 0.1_8215 * latents
__snake_case: List[Any] = self.vae.decode(A ).sample
__snake_case: str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case: List[Any] = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
__snake_case , __snake_case: List[str] = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case: Optional[int] = None
if output_type == "pil":
__snake_case: Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 293
| 0
|
import numpy as np
from transformers import Pipeline
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[Any]:
__snake_case: List[str] = np.max(SCREAMING_SNAKE_CASE__ , axis=-1 , keepdims=SCREAMING_SNAKE_CASE__)
__snake_case: str = np.exp(outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple , **A : Tuple ):
__snake_case: List[str] = {}
if "second_text" in kwargs:
__snake_case: str = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def UpperCAmelCase__ ( self : Optional[int] , A : Dict , A : str=None ):
return self.tokenizer(A , text_pair=A , return_tensors=self.framework )
def UpperCAmelCase__ ( self : Any , A : Any ):
return self.model(**A )
def UpperCAmelCase__ ( self : Any , A : Optional[int] ):
__snake_case: Optional[Any] = model_outputs.logits[0].numpy()
__snake_case: str = softmax(A )
__snake_case: str = np.argmax(A )
__snake_case: List[str] = self.model.config.idalabel[best_class]
__snake_case: Optional[Any] = probabilities[best_class].item()
__snake_case: Optional[Any] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 365
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase : Optional[int] = "\\n\n"
__UpperCAmelCase : Tuple = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
__UpperCAmelCase : Tuple = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def UpperCAmelCase__ ( self : int , A : str , A : Optional[Any] , A : int = 16 , A : bool = True , A : Optional[int]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__snake_case: Optional[Any] = """cuda"""
else:
__snake_case: str = """cuda""" if torch.cuda.is_available() else """cpu"""
__snake_case: Dict = AutoModelForCausalLM.from_pretrained(A )
__snake_case: List[str] = model.to(A )
__snake_case: Optional[Any] = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__snake_case: Dict = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__snake_case: Tuple = model.config.max_length - 1
else:
__snake_case: Optional[Any] = model.config.max_length
__snake_case: Optional[int] = tokenizer(
A , add_special_tokens=A , padding=A , truncation=A , max_length=A , return_tensors="""pt""" , return_attention_mask=A , ).to(A )
__snake_case: Tuple = encodings["""input_ids"""]
__snake_case: Any = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__snake_case: Optional[int] = []
__snake_case: Optional[int] = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(A ) , A ) ):
__snake_case: Dict = min(start_index + batch_size , len(A ) )
__snake_case: Optional[int] = encoded_texts[start_index:end_index]
__snake_case: List[Any] = attn_masks[start_index:end_index]
if add_start_token:
__snake_case: Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
__snake_case: Optional[Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__snake_case: Union[str, Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A ), attn_mask] , dim=1 )
__snake_case: List[str] = encoded_batch
with torch.no_grad():
__snake_case: Union[str, Any] = model(A , attention_mask=A ).logits
__snake_case: List[str] = out_logits[..., :-1, :].contiguous()
__snake_case: Optional[Any] = labels[..., 1:].contiguous()
__snake_case: Dict = attn_mask[..., 1:].contiguous()
__snake_case: Optional[Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 293
| 0
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: int = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__snake_case: int = """A painting of a squirrel eating a burger"""
__snake_case: str = jax.device_count()
__snake_case: Optional[int] = num_samples * [prompt]
__snake_case: Optional[int] = sd_pipe.prepare_inputs(A )
__snake_case: Any = replicate(A )
__snake_case: Optional[int] = shard(A )
__snake_case: List[Any] = jax.random.PRNGKey(0 )
__snake_case: int = jax.random.split(A , jax.device_count() )
__snake_case: List[Any] = sd_pipe(A , A , A , num_inference_steps=25 , jit=A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__snake_case: Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case: Tuple = images[0, 253:256, 253:256, -1]
__snake_case: Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case: List[Any] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[Any] = """stabilityai/stable-diffusion-2"""
__snake_case: int = FlaxDPMSolverMultistepScheduler.from_pretrained(A , subfolder="""scheduler""" )
__snake_case: Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
A , scheduler=A , revision="""bf16""" , dtype=jnp.bfloataa , )
__snake_case: Tuple = scheduler_params
__snake_case: Optional[int] = """A painting of a squirrel eating a burger"""
__snake_case: str = jax.device_count()
__snake_case: Dict = num_samples * [prompt]
__snake_case: Tuple = sd_pipe.prepare_inputs(A )
__snake_case: Tuple = replicate(A )
__snake_case: List[Any] = shard(A )
__snake_case: Any = jax.random.PRNGKey(0 )
__snake_case: Dict = jax.random.split(A , jax.device_count() )
__snake_case: List[Any] = sd_pipe(A , A , A , num_inference_steps=25 , jit=A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__snake_case: Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case: Optional[int] = images[0, 253:256, 253:256, -1]
__snake_case: Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case: List[str] = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 366
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293
| 0
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__UpperCAmelCase : Dict = get_logger(__name__)
__UpperCAmelCase : Optional[Any] = Path(__file__).parent / "model_card_template.md"
__UpperCAmelCase : int = uuida().hex
__UpperCAmelCase : Optional[Any] = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
__UpperCAmelCase : Optional[Any] = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
__UpperCAmelCase : Union[str, Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def A__ ( SCREAMING_SNAKE_CASE__ = None) -> str:
__snake_case: int = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""").upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items())
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
ua += "; " + user_agent
return ua
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None) -> List[Any]:
if token is None:
__snake_case: List[Any] = HfFolder.get_token()
if organization is None:
__snake_case: List[Any] = whoami(SCREAMING_SNAKE_CASE__)["""name"""]
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""")
if hasattr(SCREAMING_SNAKE_CASE__ , """local_rank""") and args.local_rank not in [-1, 0]:
return
__snake_case: Tuple = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , """hub_token""") else None
__snake_case: Optional[Any] = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , """dataset_name""") else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , """gradient_accumulation_steps""") else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta1""") else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta2""") else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , """adam_weight_decay""") else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , """adam_epsilon""") else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , """lr_scheduler""") else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , """lr_warmup_steps""") else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , """ema_inv_gamma""") else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , """ema_power""") else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , """ema_max_decay""") else None , mixed_precision=args.mixed_precision , )
__snake_case: str = os.path.join(args.output_dir , """README.md""")
model_card.save(SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None) -> Optional[int]:
if resolved_file is None or commit_hash is not None:
return commit_hash
__snake_case: List[str] = str(Path(SCREAMING_SNAKE_CASE__).as_posix())
__snake_case: Optional[int] = re.search(r"""snapshots/([^/]+)/""" , SCREAMING_SNAKE_CASE__)
if search is None:
return None
__snake_case: Tuple = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__UpperCAmelCase : Tuple = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
__UpperCAmelCase : int = os.path.join(hf_cache_home, "diffusers")
def A__ ( SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None) -> None:
if new_cache_dir is None:
__snake_case: List[Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
__snake_case: Dict = old_diffusers_cache
__snake_case: str = Path(SCREAMING_SNAKE_CASE__).expanduser()
__snake_case: List[str] = Path(SCREAMING_SNAKE_CASE__).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*"""):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__snake_case: Optional[Any] = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__)
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__)
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""")
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__UpperCAmelCase : Optional[Any] = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
__UpperCAmelCase : Tuple = 0
else:
with open(cache_version_file) as f:
try:
__UpperCAmelCase : Optional[int] = int(f.read())
except ValueError:
__UpperCAmelCase : Dict = 0
if cache_version < 1:
__UpperCAmelCase : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
__UpperCAmelCase : List[Any] = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
"the directory exists and can be written to."
)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None) -> str:
if variant is not None:
__snake_case: List[Any] = weights_name.split(""".""")
__snake_case: str = splits[:-1] + [variant] + splits[-1:]
__snake_case: Optional[int] = """.""".join(SCREAMING_SNAKE_CASE__)
return weights_name
def A__ ( SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ) -> Dict:
__snake_case: Union[str, Any] = str(SCREAMING_SNAKE_CASE__)
if os.path.isfile(SCREAMING_SNAKE_CASE__):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)):
# Load from a PyTorch checkpoint
__snake_case: List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)):
__snake_case: List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''')
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__).base_version) >= version.parse("""0.20.0""")
):
try:
__snake_case: str = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
__snake_case: str = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""")
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"""this model name. Check the model page at """
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''')
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''')
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''')
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""")
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''')
| 367
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """num_attention_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self : int , A : str , A : Dict=13 , A : str=32 , A : Any=2 , A : Optional[Any]=3 , A : str=640 , A : Tuple=4 , A : Dict="silu" , A : List[Any]=3 , A : Any=32 , A : Any=0.1 , A : int=0.1 , A : Dict=0.1 , A : Optional[Any]=0.02 , A : List[Any]=True , A : Tuple=True , A : Any=10 , A : Optional[int]=None , ):
__snake_case: List[Any] = parent
__snake_case: Dict = batch_size
__snake_case: int = image_size
__snake_case: Tuple = patch_size
__snake_case: Tuple = num_channels
__snake_case: str = last_hidden_size
__snake_case: Dict = num_attention_heads
__snake_case: Dict = hidden_act
__snake_case: Tuple = conv_kernel_size
__snake_case: List[str] = output_stride
__snake_case: List[str] = hidden_dropout_prob
__snake_case: Optional[Any] = attention_probs_dropout_prob
__snake_case: int = classifier_dropout_prob
__snake_case: List[Any] = use_labels
__snake_case: Union[str, Any] = is_training
__snake_case: Union[str, Any] = num_labels
__snake_case: str = initializer_range
__snake_case: List[Any] = scope
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case: Tuple = None
__snake_case: Any = None
if self.use_labels:
__snake_case: Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case: str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case: Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : int ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : str , A : Optional[Any] , A : Any , A : Any , A : Union[str, Any] ):
__snake_case: List[Any] = MobileViTModel(config=A )
model.to(A )
model.eval()
__snake_case: int = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : str , A : List[Any] , A : Any , A : Any , A : int ):
__snake_case: str = self.num_labels
__snake_case: Optional[int] = MobileViTForImageClassification(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[Any] , A : int , A : str ):
__snake_case: List[Any] = self.num_labels
__snake_case: Dict = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case: Tuple = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case: Any = config_and_inputs
__snake_case: Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = MobileViTModelTester(self )
__snake_case: str = MobileViTConfigTester(self , config_class=A , has_text_modality=A )
def UpperCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = model_class(A )
__snake_case: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case: Optional[int] = [*signature.parameters.keys()]
__snake_case: List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ ( self : Dict ):
def check_hidden_states_output(A : List[Any] , A : int , A : Tuple ):
__snake_case: List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__snake_case: str = model(**self._prepare_for_class(A , A ) )
__snake_case: Optional[int] = outputs.hidden_states
__snake_case: Any = 5
self.assertEqual(len(A ) , A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case: Union[str, Any] = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case: Dict = True
check_hidden_states_output(A , A , A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case: List[Any] = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def A__ ( ) -> Optional[int]:
__snake_case: Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase__ ( self : Dict ):
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Tuple = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(A )
__snake_case: str = self.default_image_processor
__snake_case: Optional[Any] = prepare_img()
__snake_case: List[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
# verify the logits
__snake_case: List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A )
__snake_case: Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = model.to(A )
__snake_case: Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[Any] = prepare_img()
__snake_case: List[str] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: List[Any] = model(**A )
__snake_case: Optional[int] = outputs.logits
# verify the logits
__snake_case: Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A )
__snake_case: Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: str = model.to(A )
__snake_case: Optional[Any] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = prepare_img()
__snake_case: Optional[int] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
__snake_case: List[Any] = outputs.logits.detach().cpu()
__snake_case: List[str] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)] )
__snake_case: str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A )
__snake_case: int = image_processor.post_process_semantic_segmentation(outputs=A )
__snake_case: Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A )
| 293
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCAmelCase : str = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__UpperCAmelCase : Optional[int] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def A__ ( ) -> List[str]:
__snake_case: Optional[int] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
__snake_case: int = bs[:]
__snake_case: Union[str, Any] = 0
for b in range(2**8):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE__)
cs.append(2**8 + n)
n += 1
__snake_case: Dict = [chr(SCREAMING_SNAKE_CASE__) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
__snake_case: List[str] = set()
__snake_case: List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
__snake_case: Optional[int] = char
return pairs
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , A : Dict , A : str , A : List[str]="replace" , A : Any="<s>" , A : List[Any]="</s>" , A : Union[str, Any]="</s>" , A : List[str]="<s>" , A : Optional[int]="<unk>" , A : Union[str, Any]="<pad>" , A : List[str]="<mask>" , A : Dict=False , **A : List[str] , ):
__snake_case: Optional[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
__snake_case: int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
__snake_case: str = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
__snake_case: Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
__snake_case: Optional[int] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
__snake_case: List[str] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case: Any = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
errors=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , add_prefix_space=A , **A , )
with open(A , encoding="""utf-8""" ) as vocab_handle:
__snake_case: str = json.load(A )
__snake_case: int = {v: k for k, v in self.encoder.items()}
__snake_case: Optional[Any] = errors # how to handle errors in decoding
__snake_case: str = bytes_to_unicode()
__snake_case: Any = {v: k for k, v in self.byte_encoder.items()}
with open(A , encoding="""utf-8""" ) as merges_handle:
__snake_case: Dict = merges_handle.read().split("""\n""" )[1:-1]
__snake_case: Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
__snake_case: List[str] = dict(zip(A , range(len(A ) ) ) )
__snake_case: str = {}
__snake_case: int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__snake_case: Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCAmelCase__ ( self : List[str] ):
return len(self.encoder )
def UpperCAmelCase__ ( self : List[str] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Tuple , A : Union[str, Any] ):
if token in self.cache:
return self.cache[token]
__snake_case: Any = tuple(A )
__snake_case: Optional[int] = get_pairs(A )
if not pairs:
return token
while True:
__snake_case: List[str] = min(A , key=lambda A : self.bpe_ranks.get(A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case: Any = bigram
__snake_case: Optional[int] = []
__snake_case: Union[str, Any] = 0
while i < len(A ):
try:
__snake_case: Optional[int] = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case: Any = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case: List[Any] = tuple(A )
__snake_case: Dict = new_word
if len(A ) == 1:
break
else:
__snake_case: List[str] = get_pairs(A )
__snake_case: Optional[int] = """ """.join(A )
__snake_case: List[Any] = word
return word
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[Any] ):
__snake_case: str = []
for token in re.findall(self.pat , A ):
__snake_case: int = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase__ ( self : Union[str, Any] , A : int ):
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : str , A : Any ):
return self.decoder.get(A )
def UpperCAmelCase__ ( self : Optional[int] , A : str ):
__snake_case: Optional[Any] = """""".join(A )
__snake_case: Any = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCAmelCase__ ( self : Union[str, Any] , A : str , A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case: Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case: Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A ) + """\n""" )
__snake_case: Any = 0
with open(A , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
__snake_case: Union[str, Any] = token_index
writer.write(""" """.join(A ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[int] , A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case: Dict = [self.cls_token_id]
__snake_case: Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : List[str] , A : List[int] , A : Optional[List[int]] = None , A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ ( self : Any , A : List[int] , A : Optional[List[int]] = None ):
__snake_case: Any = [self.sep_token_id]
__snake_case: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : int , A : str , A : str=False , **A : Optional[Any] ):
__snake_case: List[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
__snake_case: Any = """ """ + text
return (text, kwargs)
| 368
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : List[str] ):
__snake_case , __snake_case: List[str] = super().prepare_init_args_and_inputs_for_common()
__snake_case: List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : int ):
__snake_case , __snake_case: Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Any ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: str = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[str] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
__snake_case: List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: int = super().prepare_init_args_and_inputs_for_common()
__snake_case: int = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[int] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ):
__snake_case: Any = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(A )
| 293
| 0
|
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__) -> np.array:
return 1 / (1 + np.exp(-vector))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = ort.SessionOptions()
__snake_case: List[Any] = False
return options
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: int = """A red cat sitting on a park bench"""
__snake_case: Any = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=10 , generator=A , output_type="""np""" , )
__snake_case: List[Any] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: Optional[int] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
__snake_case: List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=A , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: Optional[int] = """A red cat sitting on a park bench"""
__snake_case: Dict = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=20 , generator=A , output_type="""np""" , )
__snake_case: List[str] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Union[str, Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 293
| 0
|
def A__ ( SCREAMING_SNAKE_CASE__ = 100) -> int:
__snake_case: List[str] = (n * (n + 1) // 2) ** 2
__snake_case: List[str] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 370
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A__ ( SCREAMING_SNAKE_CASE__ = 3) -> qiskit.result.counts.Counts:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
raise TypeError("""number of qubits must be a integer.""")
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""")
if math.floor(SCREAMING_SNAKE_CASE__) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""")
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""")
__snake_case: int = QuantumRegister(SCREAMING_SNAKE_CASE__ , """qr""")
__snake_case: List[str] = ClassicalRegister(SCREAMING_SNAKE_CASE__ , """cr""")
__snake_case: Optional[Any] = QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.h(number_of_qubits - i - 1)
counter -= 1
for j in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
for k in range(number_of_qubits // 2):
quantum_circuit.swap(SCREAMING_SNAKE_CASE__ , number_of_qubits - k - 1)
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# simulate with 10000 shots
__snake_case: Union[str, Any] = Aer.get_backend("""qasm_simulator""")
__snake_case: Optional[Any] = execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1_0000)
return job.result().get_counts(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 293
| 0
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Union[str, Any] , A : str ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__snake_case: str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = """sshleifer/tiny-gpt2"""
__snake_case: Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: str = PyTorchBenchmark(A )
__snake_case: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = """sgugger/tiny-distilbert-classification"""
__snake_case: List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , )
__snake_case: Optional[Any] = PyTorchBenchmark(A )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Dict = """sshleifer/tiny-gpt2"""
__snake_case: List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , torchscript=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Tuple = PyTorchBenchmark(A )
__snake_case: Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: str = """sshleifer/tiny-gpt2"""
__snake_case: Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , fpaa=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Union[str, Any] = PyTorchBenchmark(A )
__snake_case: Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[str] = """sshleifer/tiny-gpt2"""
__snake_case: List[Any] = AutoConfig.from_pretrained(A )
# set architectures equal to `None`
__snake_case: Union[str, Any] = None
__snake_case: Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: List[Any] = PyTorchBenchmark(A , configs=[config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: str = """sshleifer/tiny-gpt2"""
__snake_case: int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: str = PyTorchBenchmark(A )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = """sshleifer/tiny-gpt2"""
__snake_case: List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A , multi_process=A , )
__snake_case: int = PyTorchBenchmark(A )
__snake_case: List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[int] = """sshleifer/tiny-gpt2"""
__snake_case: Tuple = AutoConfig.from_pretrained(A )
__snake_case: Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: List[Any] = PyTorchBenchmark(A , configs=[config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Tuple = """sshleifer/tinier_bart"""
__snake_case: Optional[int] = AutoConfig.from_pretrained(A )
__snake_case: List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Union[str, Any] = PyTorchBenchmark(A , configs=[config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Tuple = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = AutoConfig.from_pretrained(A )
__snake_case: List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: int = PyTorchBenchmark(A , configs=[config] )
__snake_case: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = """sshleifer/tinier_bart"""
__snake_case: List[Any] = AutoConfig.from_pretrained(A )
__snake_case: Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Optional[Any] = PyTorchBenchmark(A , configs=[config] )
__snake_case: int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[int] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case: Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A , """train_time.csv""" ) , env_info_csv_file=os.path.join(A , """env.csv""" ) , multi_process=A , )
__snake_case: Dict = PyTorchBenchmark(A )
benchmark.run()
self.assertTrue(Path(os.path.join(A , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """env.csv""" ) ).exists() )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: int = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A : List[str] ):
self.assertTrue(hasattr(A , """sequential""" ) )
self.assertTrue(hasattr(A , """cumulative""" ) )
self.assertTrue(hasattr(A , """current""" ) )
self.assertTrue(hasattr(A , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case: Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , """log.txt""" ) , log_print=A , trace_memory_line_by_line=A , multi_process=A , )
__snake_case: List[Any] = PyTorchBenchmark(A )
__snake_case: List[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A , """log.txt""" ) ).exists() )
| 371
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 293
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : int = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__a : int = True if 'large' in model_name or 'huge' in model_name else False
__a : Dict = True if 'large' in model_name or 'huge' in model_name else False
__a : int = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__a : Any = [3, 3, 3, 3]
__a : Optional[Any] = [5, 5, 5, 5]
elif "fl4" in model_name:
__a : Dict = [4, 4, 4, 4]
__a : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__a : List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
__a : Dict = [3, 3, 3, 3]
else:
__a : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
__a : int = 96
elif "small" in model_name:
__a : Union[str, Any] = 96
elif "base" in model_name:
__a : List[str] = 128
elif "large" in model_name:
__a : Tuple = 192
elif "xlarge" in model_name:
__a : Union[str, Any] = 256
elif "huge" in model_name:
__a : str = 352
# set label information
__a : Optional[int] = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__a : Any = 'imagenet-22k-id2label.json'
else:
__a : Optional[int] = 'imagenet-1k-id2label.json'
__a : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__a : Any = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a : int = {v: k for k, v in idalabel.items()}
__a : str = FocalNetConfig(
embed_dim=_SCREAMING_SNAKE_CASE , depths=_SCREAMING_SNAKE_CASE , focal_levels=_SCREAMING_SNAKE_CASE , focal_windows=_SCREAMING_SNAKE_CASE , use_conv_embed=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE , use_post_layernorm=_SCREAMING_SNAKE_CASE , use_layerscale=_SCREAMING_SNAKE_CASE , )
return config
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
if "patch_embed.proj" in name:
__a : Tuple = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__a : Any = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__a : Dict = 'encoder.' + name
if "encoder.layers" in name:
__a : Optional[Any] = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__a : Union[str, Any] = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__a : Optional[int] = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__a : List[str] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__a : Union[str, Any] = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__a : Optional[int] = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__a : Any = 'layernorm.weight'
if name == "norm.bias":
__a : List[Any] = 'layernorm.bias'
if "head" in name:
__a : List[str] = name.replace('head' , 'classifier' )
else:
__a : List[str] = 'focalnet.' + name
return name
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple=False ):
# fmt: off
__a : int = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__a : int = model_name_to_url[model_name]
print('Checkpoint URL: ' , _SCREAMING_SNAKE_CASE )
__a : List[str] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__a : Optional[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
__a : Optional[Any] = val
__a : Optional[Any] = get_focalnet_config(_SCREAMING_SNAKE_CASE )
__a : Any = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify conversion
__a : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : Optional[Any] = BitImageProcessor(
do_resize=_SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=_SCREAMING_SNAKE_CASE , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE , )
__a : List[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
__a : List[str] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
__a : List[Any] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__a : int = image_transforms(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _SCREAMING_SNAKE_CASE , atol=1e-4 )
__a : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
__a : int = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__a : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__a : Optional[int] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__a : Optional[Any] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__a : List[str] = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__a : Tuple = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__a : Optional[Any] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
__lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__lowercase : Tuple = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 294
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase : List[Any] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "ernie_m"
A_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __a = 25_0002 , __a = 768 , __a = 12 , __a = 12 , __a = 3072 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 514 , __a = 0.02 , __a = 1 , __a = 1E-0_5 , __a=None , __a=False , __a=0.0 , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
__a : int = vocab_size
__a : Dict = hidden_size
__a : str = num_hidden_layers
__a : Dict = num_attention_heads
__a : List[str] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Any = max_position_embeddings
__a : int = initializer_range
__a : Dict = layer_norm_eps
__a : int = classifier_dropout
__a : Dict = is_decoder
__a : int = act_dropout
| 294
| 1
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ):
__a : set = set()
__a : int
__a : Fraction = Fraction(0 )
__a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a : str = x_num * y_den + x_den * y_num
__a : List[str] = x_den * y_den
__a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : Optional[int] = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : int = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : List[str] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Union[str, Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
__a : Optional[int] = x_num * y_num
__a : int = x_den * y_num + x_num * y_den
__a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : int = x_num * x_num * y_num * y_num
__a : Any = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Union[str, Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 294
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a ):
'''simple docstring'''
super().__init__()
__a : int = module
__a : List[Any] = nn.Sequential(
nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , )
__a : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
return self.module(__a , *__a , **__a ) + self.adapter(__a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A_ = "bigscience/bloom-1b7"
# Constant values
A_ = 2.109659552692574
A_ = "Hello my name is"
A_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
A_ = 10
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_abit.config
self.assertTrue(hasattr(__a , 'quantization_config' ) )
__a : Union[str, Any] = config.to_dict()
__a : Tuple = config.to_diff_dict()
__a : Tuple = config.to_json_string()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__a : List[Any] = self.model_fpaa.get_memory_footprint()
__a : List[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__a : Tuple = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = BitsAndBytesConfig()
__a : Tuple = True
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' )
__a : List[Any] = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = BitsAndBytesConfig()
with self.assertRaises(__a ):
__a : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , load_in_abit=__a , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(__a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Optional[int] = self.model_fpaa.to(torch.floataa )
__a : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__a : List[Any] = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.float()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__a , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def __UpperCAmelCase ( cls ):
'''simple docstring'''
__a : Any = 't5-small'
__a : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__a : int = AutoTokenizer.from_pretrained(cls.model_name )
__a : Union[str, Any] = 'Translate in German: Hello, my dog is cute'
def __UpperCAmelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
__a : List[str] = None
# test with `t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : Any = model.generate(**__a )
# test with `flan-t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[Any] = model.generate(**__a )
__a : Optional[int] = modules
def __UpperCAmelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[str] = model.generate(**__a )
# test with `flan-t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : int = model.generate(**__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
__a : List[Any] = 'bigscience/bloom-560m'
__a : Union[str, Any] = 't5-small'
# Different types of model
__a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Sequence classification model
__a : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__a , device_map='auto' )
# CausalLM model
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Seq2seq model
__a : Any = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__a : str = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__a , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 'facebook/opt-350m'
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__a : Tuple = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__a : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__a ) ):
__a : str = LoRALayer(module.q_proj , rank=16 )
__a : str = LoRALayer(module.k_proj , rank=16 )
__a : Optional[int] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__a : int = model.forward(**__a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__a , __a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "gpt2-xl"
A_ = 3.3191854854152187
| 294
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : Tuple = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
for attribute in key.split('.' ):
__a : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
__a : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
__a : int = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a : Union[str, Any] = value
elif weight_type == "weight_g":
__a : str = value
elif weight_type == "weight_v":
__a : Any = value
elif weight_type == "bias":
__a : int = value
else:
__a : Optional[int] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int ):
__a : Dict = []
__a : Any = fairseq_model.state_dict()
__a : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__a : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__a : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__a : Optional[Any] = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__a : Dict = True
if "*" in mapped_key:
__a : Tuple = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__a : Optional[int] = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__a : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__a : Dict = 'weight_v'
elif "weight" in name:
__a : Union[str, Any] = 'weight'
elif "bias" in name:
__a : Optional[int] = 'bias'
else:
__a : int = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
__a : Union[str, Any] = full_name.split('conv_layers.' )[-1]
__a : Dict = name.split('.' )
__a : Optional[Any] = int(items[0] )
__a : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int ):
__a : Optional[Any] = SEWConfig()
if is_finetuned:
__a : Optional[int] = model.wav_encoder.wav_model.cfg
else:
__a : List[str] = model.cfg
__a : Any = fs_config.conv_bias
__a : Optional[Any] = eval(fs_config.conv_feature_layers )
__a : int = [x[0] for x in conv_layers]
__a : str = [x[1] for x in conv_layers]
__a : Optional[Any] = [x[2] for x in conv_layers]
__a : Optional[int] = 'gelu'
__a : List[Any] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
__a : Optional[Any] = 0.0
__a : List[Any] = fs_config.activation_fn.name
__a : Any = fs_config.encoder_embed_dim
__a : Tuple = 0.0_2
__a : Optional[Any] = fs_config.encoder_ffn_embed_dim
__a : Tuple = 1e-5
__a : int = fs_config.encoder_layerdrop
__a : List[str] = fs_config.encoder_attention_heads
__a : Optional[int] = fs_config.conv_pos_groups
__a : Optional[Any] = fs_config.conv_pos
__a : List[Any] = len(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = fs_config.encoder_layers
__a : Optional[Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__a : Tuple = model.cfg
__a : List[Any] = fs_config.final_dropout
__a : Any = fs_config.layerdrop
__a : List[str] = fs_config.activation_dropout
__a : List[str] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__a : Any = fs_config.attention_dropout
__a : Any = fs_config.dropout_input
__a : Tuple = fs_config.dropout
__a : List[Any] = fs_config.mask_channel_length
__a : Union[str, Any] = fs_config.mask_channel_prob
__a : List[Any] = fs_config.mask_length
__a : Dict = fs_config.mask_prob
__a : str = 'Wav2Vec2FeatureExtractor'
__a : Any = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : str=True ):
if is_finetuned:
__a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__a , __a , __a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__a : Dict = SEWConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
__a : List[str] = convert_config(model[0] , _SCREAMING_SNAKE_CASE )
__a : int = model[0].eval()
__a : List[str] = True if config.feat_extract_norm == 'layer' else False
__a : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
if is_finetuned:
if dict_path:
__a : Any = Dictionary.load(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : List[str] = target_dict.pad_index
__a : Optional[Any] = target_dict.bos_index
__a : Tuple = target_dict.pad_index
__a : Dict = target_dict.bos_index
__a : str = target_dict.eos_index
__a : List[Any] = len(target_dict.symbols )
__a : Tuple = os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , _SCREAMING_SNAKE_CASE )
__a : Optional[int] = WavaVecaCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_SCREAMING_SNAKE_CASE , )
__a : Optional[int] = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
__a : Any = SEWForCTC(_SCREAMING_SNAKE_CASE )
else:
__a : str = SEWModel(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__lowercase : str = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 294
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = None
A_ = None
A_ = None
A_ = None
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__a : Any = project_dim
__a : Optional[Any] = pooler_fn
__a : int = learn_encoder
__a : str = use_attention_mask
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = [r"pooler", r"logit_scale"]
A_ = [r"position_ids", r"predictions.decoder.bias"]
A_ = "roberta"
A_ = RobertaSeriesConfig
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[Any] = XLMRobertaModel(__a )
__a : str = nn.Linear(config.hidden_size , config.project_dim )
__a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a )
if self.has_pre_transformation:
__a : int = nn.Linear(config.hidden_size , config.project_dim )
__a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Tuple = self.base_model(
input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , )
if self.has_pre_transformation:
__a : Optional[Any] = outputs['hidden_states'][-2]
__a : Optional[int] = self.pre_LN(__a )
__a : Union[str, Any] = self.transformation_pre(__a )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__a : Optional[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 294
| 1
|
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
__lowercase : List[Any] = True
from torch.cuda.amp import autocast
__lowercase : List[Any] = logging.getLogger(__name__)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Dict=None ):
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class __UpperCamelCase :
A_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
A_ = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
A_ = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
A_ = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
A_ = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
A_ = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
A_ = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class __UpperCamelCase :
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
A_ = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
A_ = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = True
A_ = None
A_ = None
A_ = None
A_ = None
def __call__( self , __a ):
'''simple docstring'''
__a : List[Any] = [{'input_values': feature['input_values']} for feature in features]
__a : Optional[int] = [{'input_ids': feature['labels']} for feature in features]
__a : Any = self.processor.pad(
__a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__a : List[str] = self.processor.pad(
labels=__a , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__a : Union[str, Any] = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
__a : int = labels
return batch
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
model.train()
__a : int = self._prepare_inputs(__a )
if self.use_amp:
with autocast():
__a : Optional[int] = self.compute_loss(__a , __a )
else:
__a : Optional[Any] = self.compute_loss(__a , __a )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__a : int = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__a : Any = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__a : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a ).backward()
elif self.use_apex:
with amp.scale_loss(__a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a )
else:
loss.backward()
return loss.detach()
def lowerCamelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a : Any = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__a : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__a : Optional[Any] = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
__a : Dict = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
__a : str = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : Any = re.sub(_SCREAMING_SNAKE_CASE , '' , batch['sentence'] ).lower() + ' '
return batch
__a : List[str] = train_dataset.map(_SCREAMING_SNAKE_CASE , remove_columns=['sentence'] )
__a : Any = eval_dataset.map(_SCREAMING_SNAKE_CASE , remove_columns=['sentence'] )
def extract_all_chars(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : int = ' '.join(batch['text'] )
__a : int = list(set(_SCREAMING_SNAKE_CASE ) )
return {"vocab": [vocab], "all_text": [all_text]}
__a : List[str] = train_dataset.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , batch_size=-1 , keep_in_memory=_SCREAMING_SNAKE_CASE , remove_columns=train_dataset.column_names , )
__a : Dict = train_dataset.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , batch_size=-1 , keep_in_memory=_SCREAMING_SNAKE_CASE , remove_columns=eval_dataset.column_names , )
__a : Dict = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__a : List[str] = {v: k for k, v in enumerate(_SCREAMING_SNAKE_CASE )}
__a : List[Any] = vocab_dict[' ']
del vocab_dict[" "]
__a : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
__a : List[Any] = len(_SCREAMING_SNAKE_CASE )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a : Optional[int] = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
__a : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
__a : List[str] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__a : Dict = min(len(_SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
__a : Optional[Any] = train_dataset.select(range(_SCREAMING_SNAKE_CASE ) )
if data_args.max_val_samples is not None:
__a : Optional[Any] = eval_dataset.select(range(data_args.max_val_samples ) )
__a : Any = torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_SCREAMING_SNAKE_CASE : Dict ):
__a , __a : Union[str, Any] = torchaudio.load(batch['path'] )
__a : List[Any] = resampler(_SCREAMING_SNAKE_CASE ).squeeze().numpy()
__a : Tuple = 16_000
__a : Union[str, Any] = batch['text']
return batch
__a : int = train_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__a : Optional[Any] = eval_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_SCREAMING_SNAKE_CASE : Optional[int] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
__a : List[Any] = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(_SCREAMING_SNAKE_CASE )
return batch
__a : Optional[int] = train_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , )
__a : Tuple = eval_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , )
# Metric
__a : Tuple = datasets.load_metric('wer' )
def compute_metrics(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Union[str, Any] = pred.predictions
__a : Any = np.argmax(_SCREAMING_SNAKE_CASE , axis=-1 )
__a : Any = processor.tokenizer.pad_token_id
__a : str = processor.batch_decode(_SCREAMING_SNAKE_CASE )
# we do not want to group tokens when computing the metrics
__a : Optional[Any] = processor.batch_decode(pred.label_ids , group_tokens=_SCREAMING_SNAKE_CASE )
__a : List[str] = wer_metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__a : str = DataCollatorCTCWithPadding(processor=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
# Initialize our Trainer
__a : List[Any] = CTCTrainer(
model=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__a : List[Any] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__a : int = model_args.model_name_or_path
else:
__a : Union[str, Any] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__a : Union[str, Any] = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
__a : Any = train_result.metrics
__a : List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_SCREAMING_SNAKE_CASE )
)
__a : List[Any] = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics('train' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('train' , _SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
__a : List[str] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__a : Union[str, Any] = trainer.evaluate()
__a : Any = data_args.max_val_samples if data_args.max_val_samples is not None else len(_SCREAMING_SNAKE_CASE )
__a : List[str] = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics('eval' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('eval' , _SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
main()
| 294
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : Optional[Any] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowercase : str = logging.get_logger(__name__)
# General docstring
__lowercase : List[str] = 'MobileNetV1Config'
# Base docstring
__lowercase : Tuple = 'google/mobilenet_v1_1.0_224'
__lowercase : List[Any] = [1, 10_24, 7, 7]
# Image classification docstring
__lowercase : int = 'google/mobilenet_v1_1.0_224'
__lowercase : Any = 'tabby, tabby cat'
__lowercase : Dict = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
__a : Dict = {}
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = model.mobilenet_va
else:
__a : List[Any] = model
__a : Dict = 'MobilenetV1/Conv2d_0/'
__a : Dict = backbone.conv_stem.convolution.weight
__a : Optional[Any] = backbone.conv_stem.normalization.bias
__a : int = backbone.conv_stem.normalization.weight
__a : int = backbone.conv_stem.normalization.running_mean
__a : Tuple = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__a : int = i + 1
__a : Dict = i * 2
__a : Dict = backbone.layer[pt_index]
__a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
__a : Union[str, Any] = pointer.convolution.weight
__a : Optional[Any] = pointer.normalization.bias
__a : Union[str, Any] = pointer.normalization.weight
__a : List[Any] = pointer.normalization.running_mean
__a : Tuple = pointer.normalization.running_var
__a : List[str] = backbone.layer[pt_index + 1]
__a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
__a : Optional[int] = pointer.convolution.weight
__a : List[str] = pointer.normalization.bias
__a : Dict = pointer.normalization.weight
__a : Dict = pointer.normalization.running_mean
__a : Optional[int] = pointer.normalization.running_var
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__a : Optional[int] = model.classifier.weight
__a : List[Any] = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
__a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = array
# Build TF to PyTorch weights loading map
__a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
__a : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__a : Union[str, Any] = array.squeeze().transpose()
else:
__a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
__a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ):
__a , __a : Any = features.shape[-2:]
__a , __a : int = conv_layer.stride
__a , __a : Any = conv_layer.kernel_size
if in_height % stride_height == 0:
__a : int = max(kernel_height - stride_height , 0 )
else:
__a : int = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__a : Any = max(kernel_width - stride_width , 0 )
else:
__a : str = max(kernel_width - (in_width % stride_width) , 0 )
__a : int = pad_along_width // 2
__a : Dict = pad_along_width - pad_left
__a : List[str] = pad_along_height // 2
__a : Union[str, Any] = pad_along_height - pad_top
__a : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 )
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
__a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__a : Union[str, Any] = nn.Convad(
in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , )
if use_normalization:
__a : List[str] = nn.BatchNormad(
num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , )
else:
__a : Tuple = None
if use_activation:
if isinstance(__a , __a ):
__a : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __a ):
__a : Union[str, Any] = ACTaFN[config.hidden_act]
else:
__a : Dict = config.hidden_act
else:
__a : List[Any] = None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.config.tf_padding:
__a : Union[str, Any] = apply_tf_padding(__a , self.convolution )
__a : Union[str, Any] = self.convolution(__a )
if self.normalization is not None:
__a : str = self.normalization(__a )
if self.activation is not None:
__a : Optional[int] = self.activation(__a )
return features
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = MobileNetVaConfig
A_ = load_tf_weights_in_mobilenet_va
A_ = "mobilenet_v1"
A_ = "pixel_values"
A_ = False
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if isinstance(__a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a = True ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[int] = config
__a : str = 32
__a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth )
__a : Union[str, Any] = MobileNetVaConvLayer(
__a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , )
__a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__a : Any = nn.ModuleList()
for i in range(13 ):
__a : Union[str, Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=1 , ) )
__a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__a : Union[str, Any] = self.conv_stem(__a )
__a : Any = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__a : List[str] = layer_module(__a )
if output_hidden_states:
__a : List[Any] = all_hidden_states + (hidden_states,)
__a : str = hidden_states
if self.pooler is not None:
__a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 )
else:
__a : int = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__a , pooler_output=__a , hidden_states=__a , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Tuple = config.num_labels
__a : Tuple = MobileNetVaModel(__a )
__a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a )
__a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a )
__a : List[str] = outputs.pooler_output if return_dict else outputs[1]
__a : int = self.classifier(self.dropout(__a ) )
__a : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a : str = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a : int = 'single_label_classification'
else:
__a : Optional[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
__a : Optional[Any] = MSELoss()
if self.num_labels == 1:
__a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a : Any = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
__a : List[str] = CrossEntropyLoss()
__a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a : Tuple = BCEWithLogitsLoss()
__a : Optional[int] = loss_fct(__a , __a )
if not return_dict:
__a : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
| 294
| 1
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowercase : List[str] = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class __UpperCamelCase ( unittest.TestCase , lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = load_tool('text-question-answering' )
self.tool.setup()
__a : List[Any] = load_tool('text-question-answering' , remote=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.tool(__a , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__a , 'launched the BigScience Research Workshop' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.remote_tool(__a , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__a , 'launched the BigScience Research Workshop' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.tool(text=__a , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__a , 'launched the BigScience Research Workshop' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.remote_tool(text=__a , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__a , 'launched the BigScience Research Workshop' )
| 294
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowercase : str = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ):
__a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
__a : Optional[Any] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
__a : Dict = BeautifulSoup(html.text , 'html.parser' )
__a : List[str] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE )
__a : List[str] = json.loads(_SCREAMING_SNAKE_CASE )
__a : List[Any] = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
__a : Tuple = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , )
__a : Optional[Any] = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
__a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Dict = urllib.request.build_opener()
__a : Union[str, Any] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
__a : List[Any] = F"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__lowercase : Optional[int] = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 294
| 1
|
'''simple docstring'''
import os
def lowerCamelCase ():
with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file:
__a : List[Any] = str(file.readlines()[0] )
__a : str = names.replace('"' , '' ).split(',' )
names.sort()
__a : Union[str, Any] = 0
__a : Tuple = 0
for i, name in enumerate(_SCREAMING_SNAKE_CASE ):
for letter in name:
name_score += ord(_SCREAMING_SNAKE_CASE ) - 64
total_score += (i + 1) * name_score
__a : Any = 0
return total_score
if __name__ == "__main__":
print(solution())
| 294
|
'''simple docstring'''
import os
def lowerCamelCase ():
with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file:
__a : List[Any] = str(file.readlines()[0] )
__a : str = names.replace('"' , '' ).split(',' )
names.sort()
__a : Union[str, Any] = 0
__a : Tuple = 0
for i, name in enumerate(_SCREAMING_SNAKE_CASE ):
for letter in name:
name_score += ord(_SCREAMING_SNAKE_CASE ) - 64
total_score += (i + 1) * name_score
__a : Any = 0
return total_score
if __name__ == "__main__":
print(solution())
| 294
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[Any] ):
create_state_space_tree(_SCREAMING_SNAKE_CASE , [] , 0 )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[Any] , _SCREAMING_SNAKE_CASE : list[Any] , _SCREAMING_SNAKE_CASE : int ):
if index == len(_SCREAMING_SNAKE_CASE ):
print(_SCREAMING_SNAKE_CASE )
return
create_state_space_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__lowercase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 294
|
'''simple docstring'''
__lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__lowercase : List[str] = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Any = start
# add current to visited
visited.append(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(_SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
__lowercase : Union[str, Any] = topological_sort('a', [], [])
print(sort)
| 294
| 1
|
'''simple docstring'''
from typing import Any
class __UpperCamelCase :
def __init__( self , __a ):
'''simple docstring'''
__a : int = data
__a : Tuple = None
def __repr__( self ):
'''simple docstring'''
return f"""Node({self.data})"""
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
__a : str = None
def __iter__( self ):
'''simple docstring'''
__a : List[str] = self.head
while node:
yield node.data
__a : Any = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(__a ) for item in self] )
def __getitem__( self , __a ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , __a , __a ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__a : Dict = self.head
for _ in range(__a ):
__a : Any = current.next
__a : int = data
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.insert_nth(len(self ) , __a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.insert_nth(0 , __a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__a : List[str] = Node(__a )
if self.head is None:
__a : Any = new_node
elif index == 0:
__a : Dict = self.head # link new_node to head
__a : Dict = new_node
else:
__a : Union[str, Any] = self.head
for _ in range(index - 1 ):
__a : str = temp.next
__a : Tuple = temp.next
__a : Dict = new_node
def __UpperCAmelCase ( self ): # print every node data
'''simple docstring'''
print(self )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def __UpperCAmelCase ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __UpperCAmelCase ( self , __a = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__a : int = self.head # default first node
if index == 0:
__a : str = self.head.next
else:
__a : Tuple = self.head
for _ in range(index - 1 ):
__a : Tuple = temp.next
__a : Optional[int] = temp.next
__a : Union[str, Any] = temp.next.next
return delete_node.data
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.head is None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = None
__a : Any = self.head
while current:
# Store the current node's next node.
__a : Any = current.next
# Make the current node's next point backwards
__a : Dict = prev
# Make the previous node be the current node
__a : Union[str, Any] = current
# Make the current node the next node (to progress iteration)
__a : List[str] = next_node
# Return prev in order to put the head at the end
__a : Union[str, Any] = prev
def lowerCamelCase ():
__a : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(_SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(_SCREAMING_SNAKE_CASE , i + 1 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_SCREAMING_SNAKE_CASE ) == 9
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__a : Optional[int] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def lowerCamelCase ():
__a : Union[str, Any] = [
-9,
100,
Node(77_345_112 ),
'dlrow olleH',
7,
5_555,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__a : Dict = LinkedList()
for i in test_input:
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__a : Tuple = linked_list.delete_head()
assert result == -9
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__a : Any = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__a : int = linked_list.delete_nth(10 )
assert result is None
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase ():
from doctest import testmod
testmod()
__a : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_SCREAMING_SNAKE_CASE )
print('\nReading/changing Node data using indexing:' )
print(F"""Element at Position 1: {linked_list[1]}""" )
__a : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(_SCREAMING_SNAKE_CASE )
print(F"""length of linked_list is : {len(_SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 294
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__lowercase : Union[str, Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__lowercase : Dict = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
__lowercase : Optional[Any] = BeautifulSoup(res.text, 'html.parser')
__lowercase : Any = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f'''https://google.com{link.get('href')}''')
| 294
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Tuple = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
__lowercase : str = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : List[str] = {}
with open(_SCREAMING_SNAKE_CASE , 'r' ) as file:
for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ):
__a : Tuple = line.strip()
if line:
__a : List[str] = line.split()
__a : Tuple = line_number
__a : Tuple = words[0]
__a : List[Any] = value
return result
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] ):
for attribute in key.split('.' ):
__a : Tuple = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = PARAM_MAPPING[full_name.split('.' )[-1]]
__a : int = 'param'
if weight_type is not None and weight_type != "param":
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
elif weight_type is not None and weight_type == "param":
__a : Union[str, Any] = hf_pointer
for attribute in hf_param_name.split('.' ):
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[int] = shape_pointer.shape
# let's reduce dimension
__a : str = value[0]
else:
__a : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__a : Dict = value
elif weight_type == "weight_g":
__a : int = value
elif weight_type == "weight_v":
__a : Tuple = value
elif weight_type == "bias":
__a : Tuple = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__a : List[str] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = value
else:
__a : List[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ):
__a : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = PARAM_MAPPING[full_name.split('.' )[-1]]
__a : Any = 'param'
if weight_type is not None and weight_type != "param":
__a : str = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__a : Tuple = '.'.join([key, hf_param_name] )
else:
__a : Optional[Any] = key
__a : str = value if 'lm_head' in full_key else value[0]
__lowercase : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
__a : Dict = False
for key, mapped_key in MAPPING.items():
__a : Optional[int] = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__a : str = True
if "*" in mapped_key:
__a : Optional[Any] = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__a : List[str] = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__a : int = 'weight_g'
elif "weight_v" in name:
__a : Tuple = 'weight_v'
elif "bias" in name:
__a : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : Optional[Any] = 'weight'
else:
__a : List[Any] = None
if hf_dict is not None:
rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return is_used
return is_used
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any ):
__a : Tuple = []
__a : Any = fairseq_model.state_dict()
__a : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__a : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__a : Optional[Any] = True
else:
__a : Dict = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple ):
__a : Dict = full_name.split('conv_layers.' )[-1]
__a : Optional[Any] = name.split('.' )
__a : Optional[int] = int(items[0] )
__a : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__a : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__a : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__a : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : Any=False ):
if config_path is not None:
__a : Union[str, Any] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
__a : List[str] = WavaVecaConfig()
if is_seq_class:
__a : List[Any] = read_txt_into_dict(_SCREAMING_SNAKE_CASE )
__a : List[str] = idalabel
__a : int = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE )
__a : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
elif is_finetuned:
if dict_path:
__a : Optional[Any] = Dictionary.load(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : Optional[int] = target_dict.pad_index
__a : Dict = target_dict.bos_index
__a : str = target_dict.eos_index
__a : Union[str, Any] = len(target_dict.symbols )
__a : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
__a : int = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : Tuple = 0
__a : Any = 1
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[int] = WavaVecaCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_SCREAMING_SNAKE_CASE , )
__a : Any = True if config.feat_extract_norm == 'layer' else False
__a : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__a : Dict = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
__a : Any = WavaVecaForCTC(_SCREAMING_SNAKE_CASE )
else:
__a : Any = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned or is_seq_class:
__a , __a , __a : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__a : Tuple = argparse.Namespace(task='audio_pretraining' )
__a : Optional[Any] = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE )
__a , __a , __a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE )
__a : Optional[int] = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__lowercase : List[Any] = parser.parse_args()
__lowercase : Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 294
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'laion/clap-htsat-unfused'
__a : Optional[Any] = tempfile.mkdtemp()
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_tokenizer()
__a : List[str] = self.get_feature_extractor()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
__a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 )
__a : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : int = self.get_tokenizer()
__a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : int = floats_list((3, 1000) )
__a : str = feature_extractor(__a , return_tensors='np' )
__a : int = processor(audios=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_feature_extractor()
__a : Any = self.get_tokenizer()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Union[str, Any] = 'This is a test string'
__a : Union[str, Any] = processor(text=__a )
__a : Tuple = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : str = self.get_tokenizer()
__a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Optional[int] = processor.batch_decode(__a )
__a : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_feature_extractor()
__a : Optional[int] = self.get_tokenizer()
__a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 294
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[tf.Tensor, np.ndarray] ):
if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
return list(tensor.shape )
__a : Tuple = tf.shape(_SCREAMING_SNAKE_CASE )
if tensor.shape == tf.TensorShape(_SCREAMING_SNAKE_CASE ):
return dynamic
__a : List[Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_SCREAMING_SNAKE_CASE )]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : tf.Tensor , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple=1e-5 , _SCREAMING_SNAKE_CASE : Dict=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
__a , __a : Any = tf.nn.moments(_SCREAMING_SNAKE_CASE , axes=[axis] , keepdims=_SCREAMING_SNAKE_CASE )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__a : str = [1] * inputs.shape.rank
__a : int = shape_list(_SCREAMING_SNAKE_CASE )[axis]
__a : int = tf.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : List[Any] = tf.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Compute layer normalization using the batch_normalization
# function.
__a : str = tf.nn.batch_normalization(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , variance_epsilon=_SCREAMING_SNAKE_CASE , )
return outputs
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any=0 , _SCREAMING_SNAKE_CASE : Union[str, Any]=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__a : Any = tf.shape(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__a : List[Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : tf.Tensor ):
if not isinstance(_SCREAMING_SNAKE_CASE , tf.Tensor ):
__a : List[str] = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__a : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__a : str = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__a : Optional[int] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCamelCase (_SCREAMING_SNAKE_CASE : tf.Tensor , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str = "input_ids" ):
tf.debugging.assert_less(
_SCREAMING_SNAKE_CASE , tf.cast(_SCREAMING_SNAKE_CASE , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(_SCREAMING_SNAKE_CASE )}) must be smaller than the embedding """
F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ):
__a : Tuple = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__a : str = [x for x in data if len(_SCREAMING_SNAKE_CASE ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
__a : Any = np.asarray(_SCREAMING_SNAKE_CASE )
__a : str = 1
__a : List[str] = np.array_split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__a : List[Any] = np.array_split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_SCREAMING_SNAKE_CASE ):
__a : int = chunk_data
else:
__a : Tuple = data
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict ):
if name in group.attrs:
__a : List[str] = [n.decode('utf8' ) if hasattr(_SCREAMING_SNAKE_CASE , 'decode' ) else n for n in group.attrs[name]]
else:
__a : str = []
__a : Optional[int] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(_SCREAMING_SNAKE_CASE , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
def _expand_single_ad_tensor(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if isinstance(_SCREAMING_SNAKE_CASE , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_SCREAMING_SNAKE_CASE , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _SCREAMING_SNAKE_CASE )
| 294
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
__a : int = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = seq_length
__a : List[str] = is_training
__a : Any = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Any = use_labels
__a : List[str] = vocab_size
__a : str = hidden_size
__a : List[str] = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[int] = intermediate_size
__a : Tuple = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : Dict = type_vocab_size
__a : Any = type_sequence_label_size
__a : Dict = initializer_range
__a : Optional[Any] = num_labels
__a : Optional[Any] = num_choices
__a : Union[str, Any] = relative_attention
__a : List[str] = position_biased_input
__a : List[Any] = pos_att_type
__a : Tuple = scope
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_input_mask:
__a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__a : Any = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[int] = None
__a : int = None
__a : Dict = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
__a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0]
__a : str = model(__a , token_type_ids=__a )[0]
__a : Optional[int] = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : int = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
__a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[Any] = self.num_labels
__a : List[Any] = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
__a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__a : str = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = config_and_inputs
__a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = DebertaVaModelTester(self )
__a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : str = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a : int = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
__a : str = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 294
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = CycleDiffusionPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
A_ = PipelineTesterMixin.required_optional_params - {"latents"}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__a : Dict = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1000 , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
__a : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__a : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a : int = CLIPTextModel(__a )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : Optional[int] = image / 2 + 0.5
if str(__a ).startswith('mps' ):
__a : Dict = torch.manual_seed(__a )
else:
__a : Dict = torch.Generator(device=__a ).manual_seed(__a )
__a : Optional[Any] = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : Tuple = self.get_dummy_components()
__a : int = CycleDiffusionPipeline(**__a )
__a : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : int = self.get_dummy_inputs(__a )
__a : Any = pipe(**__a )
__a : List[str] = output.images
__a : List[str] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__a : Optional[Any] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_dummy_components()
for name, module in components.items():
if hasattr(__a , 'half' ):
__a : Dict = module.half()
__a : Optional[int] = CycleDiffusionPipeline(**__a )
__a : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Dict = self.get_dummy_inputs(__a )
__a : Any = pipe(**__a )
__a : int = output.images
__a : Tuple = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__a : Any = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __UpperCAmelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
__a : Optional[int] = init_image.resize((512, 512) )
__a : List[Any] = 'CompVis/stable-diffusion-v1-4'
__a : List[str] = DDIMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : Optional[Any] = CycleDiffusionPipeline.from_pretrained(
__a , scheduler=__a , safety_checker=__a , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'A black colored car'
__a : int = 'A blue colored car'
__a : int = torch.manual_seed(0 )
__a : List[Any] = pipe(
prompt=__a , source_prompt=__a , image=__a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__a , output_type='np' , )
__a : List[Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__a : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
__a : Optional[Any] = init_image.resize((512, 512) )
__a : int = 'CompVis/stable-diffusion-v1-4'
__a : Tuple = DDIMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : Tuple = CycleDiffusionPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'A black colored car'
__a : Optional[Any] = 'A blue colored car'
__a : Optional[int] = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=__a , source_prompt=__a , image=__a , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__a , output_type='np' , )
__a : Tuple = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 294
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ):
__a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[Any] = model
__a : Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
__a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
__a : Any = forward.__wrapped__
if forward == original_forward:
break
__a : str = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[str] = model
__a : Optional[int] = compiled_model
return model
def lowerCamelCase ():
PartialState().wait_for_everyone()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ):
for key, value in kwargs.items():
__a : Optional[int] = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a : Tuple = value
return destination
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ):
if port is None:
__a : List[str] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 294
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = XLMTokenizer
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Optional[int] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__a : Optional[int] = dict(zip(__a , range(len(__a ) ) ) )
__a : List[Any] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : str = 'lower newer'
__a : Union[str, Any] = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = XLMTokenizer(self.vocab_file , self.merges_file )
__a : List[Any] = 'lower'
__a : Any = ['low', 'er</w>']
__a : Tuple = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__a : Optional[int] = tokens + ['<unk>']
__a : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
__a : str = tokenizer.encode('sequence builders' , add_special_tokens=__a )
__a : Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
__a : int = tokenizer.build_inputs_with_special_tokens(__a )
__a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 294
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 294
| 1
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__lowercase : Tuple = logging.getLogger(__name__)
__lowercase : List[Any] = 50 # max width of layer names
__lowercase : List[Any] = 70 # max width of quantizer names
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : List[str] = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_SCREAMING_SNAKE_CASE , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_SCREAMING_SNAKE_CASE , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_SCREAMING_SNAKE_CASE , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_SCREAMING_SNAKE_CASE , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_SCREAMING_SNAKE_CASE , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_SCREAMING_SNAKE_CASE , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
if args.calibrator == "max":
__a : int = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
__a : Any = 'histogram'
elif args.calibrator == "mse":
__a : List[str] = 'histogram'
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
__a : Optional[Any] = QuantDescriptor(num_bits=args.aprec , calib_method=_SCREAMING_SNAKE_CASE )
__a : Dict = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_SCREAMING_SNAKE_CASE )
quant_nn.QuantLinear.set_default_quant_desc_weight(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple=False , _SCREAMING_SNAKE_CASE : Optional[int]=False ):
logger.info('Configuring Model for Quantization' )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_SCREAMING_SNAKE_CASE , ['embeddings'] , which='weight' , _disabled=_SCREAMING_SNAKE_CASE )
if args.quant_disable:
set_quantizer_by_name(_SCREAMING_SNAKE_CASE , [''] , _disabled=_SCREAMING_SNAKE_CASE )
if args.quant_disable_keyword:
set_quantizer_by_name(_SCREAMING_SNAKE_CASE , args.quant_disable_keyword , _disabled=_SCREAMING_SNAKE_CASE )
if args.quant_disable_layer_module:
set_quantizer_by_name(_SCREAMING_SNAKE_CASE , [r'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_SCREAMING_SNAKE_CASE )
if args.quant_enable_layer_module:
set_quantizer_by_name(_SCREAMING_SNAKE_CASE , [r'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_SCREAMING_SNAKE_CASE )
if args.recalibrate_weights:
recalibrate_weights(_SCREAMING_SNAKE_CASE )
if args.fuse_qkv:
fuse_qkv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if args.clip_gelu:
clip_gelu(_SCREAMING_SNAKE_CASE , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int ):
def fusea(_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str ):
for mod in [qq, qk, qv]:
if not hasattr(_SCREAMING_SNAKE_CASE , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
__a : int = qq._amax.detach().item()
__a : Tuple = qk._amax.detach().item()
__a : Union[str, Any] = qv._amax.detach().item()
__a : Optional[int] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
qq._amax.fill_(_SCREAMING_SNAKE_CASE )
qk._amax.fill_(_SCREAMING_SNAKE_CASE )
qv._amax.fill_(_SCREAMING_SNAKE_CASE )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
__a : List[str] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_SCREAMING_SNAKE_CASE )
__a : Any = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
for name, mod in model.named_modules():
if hasattr(_SCREAMING_SNAKE_CASE , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
__a : str = mod.weight.shape[0]
__a : int = mod._weight_quantizer._amax.detach()
__a : Optional[Any] = torch.ones(_SCREAMING_SNAKE_CASE , dtype=amax.dtype , device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
for name, mod in model.named_modules():
if hasattr(_SCREAMING_SNAKE_CASE , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__a : str = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
__a : List[str] = set(range(len(mod.weight.size() ) ) ) - axis_set
__a : List[Any] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_SCREAMING_SNAKE_CASE , keepdims=_SCREAMING_SNAKE_CASE ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
__a : Optional[int] = amax
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any]=25 , _SCREAMING_SNAKE_CASE : List[str]=180 , _SCREAMING_SNAKE_CASE : Optional[int]=None ):
if ignore is None:
__a : Optional[int] = []
elif not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Any = [ignore]
__a : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(_SCREAMING_SNAKE_CASE , 'weight' ):
continue
__a : Union[str, Any] = max(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
for name, mod in model.named_modules():
__a : Tuple = getattr(_SCREAMING_SNAKE_CASE , '_input_quantizer' , _SCREAMING_SNAKE_CASE )
__a : Tuple = getattr(_SCREAMING_SNAKE_CASE , '_weight_quantizer' , _SCREAMING_SNAKE_CASE )
if not hasattr(_SCREAMING_SNAKE_CASE , 'weight' ):
continue
if type(_SCREAMING_SNAKE_CASE ) in ignore:
continue
if [True for s in ignore if type(_SCREAMING_SNAKE_CASE ) is str and s in name]:
continue
__a : Optional[int] = F"""Act:{input_q.extra_repr()}"""
__a : Union[str, Any] = F"""Wgt:{weight_q.extra_repr()}"""
__a : Any = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(_SCREAMING_SNAKE_CASE ) <= line_width:
logger.info(_SCREAMING_SNAKE_CASE )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{" ":{name_width}} {wgt_str}""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = 0
for name, mod in model.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE , pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict ):
__a : Tuple = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if quantizer_mod is not None:
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any="both" , **_SCREAMING_SNAKE_CASE : int ):
__a : Union[str, Any] = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '_input_quantizer' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if which in ["weight", "both"]:
set_quantizer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '_weight_quantizer' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : List[Any] ):
for name, mod in model.named_modules():
if hasattr(_SCREAMING_SNAKE_CASE , '_input_quantizer' ) or hasattr(_SCREAMING_SNAKE_CASE , '_weight_quantizer' ):
for n in names:
if re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
set_quantizers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Dict = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info(_SCREAMING_SNAKE_CASE )
| 294
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = None
A_ = None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : TreeNode | None ):
# Validation
def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
_SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 294
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
from transformers.testing_utils import pytest_terminal_summary_main
__a : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
| 294
| 1
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowercase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__lowercase : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
__a : int = self.diffusers_dir
shutil.copy(
os.path.join(__a , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def __UpperCAmelCase ( self , __a , __a , __a , __a=None ):
'''simple docstring'''
__a : List[Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
__a : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
__a : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
__a : Tuple = black.format_str(__a , mode=__a )
__a : int = os.path.join(self.diffusers_dir , 'new_code.py' )
with open(__a , 'w' , newline='\n' ) as f:
f.write(__a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__a )
with open(__a , 'r' ) as f:
self.assertTrue(f.read() , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , __a , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , __a ) , )
# Copy consistency with a really long name
__a : str = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , __a , __a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , __a , overwrite_result=re.sub('DDPM' , 'Test' , __a ) , )
| 294
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__lowercase : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
__lowercase : Dict = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
| 294
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=3 , __a=32 , __a=3 , __a=10 , __a=[10, 20, 30, 40] , __a=[1, 1, 2, 1] , __a=True , __a=True , __a="relu" , __a=3 , __a=None , ):
'''simple docstring'''
__a : str = parent
__a : Union[str, Any] = batch_size
__a : int = image_size
__a : List[str] = num_channels
__a : Union[str, Any] = embeddings_size
__a : Tuple = hidden_sizes
__a : Union[str, Any] = depths
__a : Optional[int] = is_training
__a : Union[str, Any] = use_labels
__a : Optional[Any] = hidden_act
__a : Dict = num_labels
__a : Tuple = scope
__a : Tuple = len(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Any = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Dict = FlaxRegNetModel(config=__a )
__a : List[str] = model(__a )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : Union[str, Any] = FlaxRegNetForImageClassification(config=__a )
__a : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.prepare_config_and_inputs()
__a , __a : int = config_and_inputs
__a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = FlaxRegNetModelTester(self )
__a : str = ConfigTester(self , config_class=__a , has_text_modality=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ):
'''simple docstring'''
return
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = model_class(__a )
__a : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : List[Any] = model_class(__a )
__a : str = model(**self._prepare_for_class(__a , __a ) )
__a : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Dict = True
check_hidden_states_output(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : List[str] = self._prepare_for_class(__a , __a )
__a : List[Any] = model_class(__a )
@jax.jit
def model_jitted(__a , **__a ):
return model(pixel_values=__a , **__a )
with self.subTest('JIT Enabled' ):
__a : Optional[Any] = model_jitted(**__a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__a : List[str] = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase ():
__a : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
__a : List[Any] = self.default_image_processor
__a : Tuple = prepare_img()
__a : Dict = image_processor(images=__a , return_tensors='np' )
__a : Tuple = model(**__a )
# verify the logits
__a : Dict = (1, 1000)
self.assertEqual(outputs.logits.shape , __a )
__a : int = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 294
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowercase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowercase : Any = [0, 25, 50]
__lowercase : int = [25, 50, 75]
__lowercase : List[str] = fuzz.membership.trimf(X, abca)
__lowercase : Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowercase : List[Any] = np.ones(75)
__lowercase : Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowercase : str = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowercase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowercase : Optional[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowercase : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowercase : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowercase : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 294
| 1
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__lowercase : List[str] = sys.version_info >= (3, 10)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Any=None ):
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = 42
A_ = 42
A_ = 42
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class __UpperCamelCase :
A_ = False
A_ = True
A_ = None
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "titi"
A_ = "toto"
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "titi"
A_ = "toto"
A_ = 42
@dataclass
class __UpperCamelCase :
A_ = "toto"
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = BasicEnum(self.foo )
@dataclass
class __UpperCamelCase :
A_ = "toto"
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = MixedTypeEnum(self.foo )
@dataclass
class __UpperCamelCase :
A_ = None
A_ = field(default=lowerCAmelCase_ , metadata={"help": "help message"} )
A_ = None
A_ = list_field(default=[] )
A_ = list_field(default=[] )
@dataclass
class __UpperCamelCase :
A_ = list_field(default=[] )
A_ = list_field(default=[1, 2, 3] )
A_ = list_field(default=["Hallo", "Bonjour", "Hello"] )
A_ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __UpperCamelCase :
A_ = field()
A_ = field()
A_ = field()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = BasicEnum(self.required_enum )
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = field()
A_ = None
A_ = field(default="toto" , metadata={"help": "help message"} )
A_ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class __UpperCamelCase :
A_ = False
A_ = True
A_ = None
@dataclass
class __UpperCamelCase :
A_ = None
A_ = field(default=lowerCAmelCase_ , metadata={"help": "help message"} )
A_ = None
A_ = list_field(default=[] )
A_ = list_field(default=[] )
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__a : List[str] = {k: v for k, v in vars(__a ).items() if k != 'container'}
__a : str = {k: v for k, v in vars(__a ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , __a ) and yy.get('choices' , __a ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](__a ) , yy['type'](__a ) )
del xx["type"], yy["type"]
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = HfArgumentParser(__a )
__a : str = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__a , required=__a )
expected.add_argument('--bar' , type=__a , required=__a )
expected.add_argument('--baz' , type=__a , required=__a )
expected.add_argument('--flag' , type=__a , default=__a , const=__a , nargs='?' )
self.argparsersEqual(__a , __a )
__a : Dict = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((__a) , ) : Optional[int] = parser.parse_args_into_dataclasses(__a , look_for_args_file=__a )
self.assertFalse(example.flag )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = HfArgumentParser(__a )
__a : List[Any] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=__a )
expected.add_argument('--baz' , default='toto' , type=__a , help='help message' )
self.argparsersEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__a , default=__a , const=__a , nargs='?' )
expected.add_argument('--baz' , type=__a , default=__a , const=__a , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=__a , dest='baz' )
expected.add_argument('--opt' , type=__a , default=__a )
__a : Optional[int] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__a )
for dataclass_type in dataclass_types:
__a : Any = HfArgumentParser(__a )
self.argparsersEqual(__a , __a )
__a : List[Any] = parser.parse_args([] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
__a : List[str] = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
__a : List[str] = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
__a : str = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
__a : Optional[Any] = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = HfArgumentParser(__a )
__a : int = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__a , __a )
__a : Dict = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
__a : List[str] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__a : Optional[Any] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
__a : Any = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__a : Dict = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
__a : str = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __UpperCAmelCase ( self ):
'''simple docstring'''
@dataclass
class __UpperCamelCase :
A_ = "toto"
__a : Any = HfArgumentParser(__a )
__a : str = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__a , __a )
__a : Any = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
__a : Union[str, Any] = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
__a : Union[str, Any] = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = HfArgumentParser(__a )
__a : int = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=__a )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=__a )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__a )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=__a )
self.argparsersEqual(__a , __a )
__a : str = parser.parse_args([] )
self.assertEqual(
__a , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
__a : Dict = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(__a , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = argparse.ArgumentParser()
expected.add_argument('--foo' , default=__a , type=__a )
expected.add_argument('--bar' , default=__a , type=__a , help='help message' )
expected.add_argument('--baz' , default=__a , type=__a )
expected.add_argument('--ces' , nargs='+' , default=[] , type=__a )
expected.add_argument('--des' , nargs='+' , default=[] , type=__a )
__a : List[Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__a )
for dataclass_type in dataclass_types:
__a : Any = HfArgumentParser(__a )
self.argparsersEqual(__a , __a )
__a : Any = parser.parse_args([] )
self.assertEqual(__a , Namespace(foo=__a , bar=__a , baz=__a , ces=[] , des=[] ) )
__a : Optional[int] = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(__a , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = HfArgumentParser(__a )
__a : int = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=__a , required=__a )
expected.add_argument('--required_str' , type=__a , required=__a )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__a , )
self.argparsersEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = HfArgumentParser(__a )
__a : List[str] = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__a , required=__a )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__a , )
expected.add_argument('--opt' , type=__a , default=__a )
expected.add_argument('--baz' , default='toto' , type=__a , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__a )
self.argparsersEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = HfArgumentParser(__a )
__a : int = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
__a : Dict = parser.parse_dict(__a )[0]
__a : Optional[Any] = BasicExample(**__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = HfArgumentParser(__a )
__a : Optional[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(__a , parser.parse_dict , __a , allow_extra_keys=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = HfArgumentParser(__a )
__a : Optional[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Optional[Any] = os.path.join(__a , 'temp_json' )
os.mkdir(__a )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(__a , __a )
__a : Any = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
__a : Optional[int] = BasicExample(**__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = HfArgumentParser(__a )
__a : Union[str, Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__a : List[str] = os.path.join(__a , 'temp_yaml' )
os.mkdir(__a )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(__a , __a )
__a : List[str] = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
__a : Optional[int] = BasicExample(**__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = HfArgumentParser(__a )
self.assertIsNotNone(__a )
| 294
|
'''simple docstring'''
import sys
__lowercase : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : List[str] = 1
for digit in s:
product *= int(_SCREAMING_SNAKE_CASE )
return product
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = N ):
__a : Optional[int] = -sys.maxsize - 1
__a : Optional[Any] = n[:13]
__a : int = 13
while cur_index < len(_SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__a : List[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
__a : Dict = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) )
__a : Optional[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 294
| 1
|
'''simple docstring'''
import cva
import numpy as np
class __UpperCamelCase :
def __init__( self , __a , __a ):
'''simple docstring'''
if k in (0.04, 0.06):
__a : Optional[int] = k
__a : Dict = window_size
else:
raise ValueError('invalid k value' )
def __str__( self ):
'''simple docstring'''
return str(self.k )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[Any] = cva.imread(__a , 0 )
__a , __a : List[Any] = img.shape
__a : list[list[int]] = []
__a : Optional[int] = img.copy()
__a : int = cva.cvtColor(__a , cva.COLOR_GRAY2RGB )
__a , __a : int = np.gradient(__a )
__a : Any = dx**2
__a : List[Any] = dy**2
__a : Optional[int] = dx * dy
__a : List[str] = 0.04
__a : Optional[int] = self.window_size // 2
for y in range(__a , h - offset ):
for x in range(__a , w - offset ):
__a : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__a : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__a : List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__a : List[Any] = (wxx * wyy) - (wxy**2)
__a : List[str] = wxx + wyy
__a : str = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__lowercase : int = HarrisCorner(0.04, 3)
__lowercase , __lowercase : Tuple = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 294
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = CodeGenTokenizer
A_ = CodeGenTokenizerFast
A_ = True
A_ = {"add_prefix_space": True}
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Dict = {'unk_token': '<unk>'}
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = 'lower newer'
__a : Tuple = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : str = 'lower newer'
__a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
__a : List[str] = tokens + [tokenizer.unk_token]
__a : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : List[Any] = self.get_tokenizer()
__a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Any = 'lower newer'
# Testing tokenization
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
__a : Dict = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
__a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
__a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a )
__a : int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
__a : Any = tokens + [rust_tokenizer.unk_token]
__a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__a : List[Any] = 'This is a simple input'
__a : Tuple = ['This is a simple input 1', 'This is a simple input 2']
__a : Tuple = ('This is a simple input', 'This is a pair')
__a : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__a : str = 'This is a simple input'
__a : Any = ['This is a simple input looooooooong', 'This is a simple input']
__a : Optional[int] = ('This is a simple input', 'This is a pair')
__a : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__a : int = tokenizer.pad_token_id
__a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
__a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
__a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
__a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = '$$$'
__a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
__a : Union[str, Any] = 'This is a simple input'
__a : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
__a : List[Any] = tokenizer.bos_token_id
__a : List[str] = tokenizer(__a )
__a : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__a : Any = tokenizer.decode(out_s.input_ids )
__a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
__a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b'
__a : Optional[int] = tokenizer.encode(__a )
__a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
__a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 294
| 1
|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 294
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
__a : Dict = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_SCREAMING_SNAKE_CASE )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__a : Any = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = 'sshleifer/tiny-gpt2'
__a : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : Optional[int] = PyTorchBenchmark(__a )
__a : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'sgugger/tiny-distilbert-classification'
__a : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , only_pretrain_model=__a , )
__a : List[Any] = PyTorchBenchmark(__a )
__a : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = 'sshleifer/tiny-gpt2'
__a : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , torchscript=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : str = PyTorchBenchmark(__a )
__a : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'sshleifer/tiny-gpt2'
__a : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , fpaa=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : Any = PyTorchBenchmark(__a )
__a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = 'sshleifer/tiny-gpt2'
__a : List[Any] = AutoConfig.from_pretrained(__a )
# set architectures equal to `None`
__a : List[Any] = None
__a : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : Dict = PyTorchBenchmark(__a , configs=[config] )
__a : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = 'sshleifer/tiny-gpt2'
__a : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : Optional[Any] = PyTorchBenchmark(__a )
__a : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = 'sshleifer/tiny-gpt2'
__a : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__a , multi_process=__a , )
__a : List[str] = PyTorchBenchmark(__a )
__a : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'sshleifer/tiny-gpt2'
__a : str = AutoConfig.from_pretrained(__a )
__a : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : int = PyTorchBenchmark(__a , configs=[config] )
__a : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = 'sshleifer/tinier_bart'
__a : Dict = AutoConfig.from_pretrained(__a )
__a : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : List[str] = PyTorchBenchmark(__a , configs=[config] )
__a : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = 'sshleifer/tiny-gpt2'
__a : Dict = AutoConfig.from_pretrained(__a )
__a : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : List[Any] = PyTorchBenchmark(__a , configs=[config] )
__a : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = 'sshleifer/tinier_bart'
__a : int = AutoConfig.from_pretrained(__a )
__a : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : List[Any] = PyTorchBenchmark(__a , configs=[config] )
__a : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , save_to_csv=__a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__a , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(__a , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(__a , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(__a , 'train_time.csv' ) , env_info_csv_file=os.path.join(__a , 'env.csv' ) , multi_process=__a , )
__a : List[Any] = PyTorchBenchmark(__a )
benchmark.run()
self.assertTrue(Path(os.path.join(__a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'env.csv' ) ).exists() )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__a ):
self.assertTrue(hasattr(__a , 'sequential' ) )
self.assertTrue(hasattr(__a , 'cumulative' ) )
self.assertTrue(hasattr(__a , 'current' ) )
self.assertTrue(hasattr(__a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__a , 'log.txt' ) , log_print=__a , trace_memory_line_by_line=__a , multi_process=__a , )
__a : List[Any] = PyTorchBenchmark(__a )
__a : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__a , 'log.txt' ) ).exists() )
| 294
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__a : int = parent
__a : Dict = batch_size
__a : Optional[int] = num_channels
__a : List[Any] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : str = do_resize
__a : Optional[Any] = size
__a : str = apply_ocr
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'apply_ocr' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
__a : Any = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__a : Tuple = Image.open(ds[0]['file'] ).convert('RGB' )
__a : Optional[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__a : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
__a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__a )
__a : List[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 294
| 1
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase ():
__a : Optional[Any] = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_SCREAMING_SNAKE_CASE , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_SCREAMING_SNAKE_CASE )
return parser.parse_args()
def lowerCamelCase ():
__a : Optional[Any] = parse_args()
# Import training_script as a module.
__a : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__a : List[Any] = script_fpath.stem
__a : List[str] = importlib.import_module(_SCREAMING_SNAKE_CASE )
# Patch sys.argv
__a : List[Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 294
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase : List[Any] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "ernie_m"
A_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __a = 25_0002 , __a = 768 , __a = 12 , __a = 12 , __a = 3072 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 514 , __a = 0.02 , __a = 1 , __a = 1E-0_5 , __a=None , __a=False , __a=0.0 , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
__a : int = vocab_size
__a : Dict = hidden_size
__a : str = num_hidden_layers
__a : Dict = num_attention_heads
__a : List[str] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Any = max_position_embeddings
__a : int = initializer_range
__a : Dict = layer_norm_eps
__a : int = classifier_dropout
__a : Dict = is_decoder
__a : int = act_dropout
| 294
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowercase : Any = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a ):
'''simple docstring'''
super().__init__()
__a : int = module
__a : List[Any] = nn.Sequential(
nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , )
__a : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
return self.module(__a , *__a , **__a ) + self.adapter(__a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A_ = "bigscience/bloom-1b7"
# Constant values
A_ = 2.109659552692574
A_ = "Hello my name is"
A_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
A_ = 10
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_abit.config
self.assertTrue(hasattr(__a , 'quantization_config' ) )
__a : Union[str, Any] = config.to_dict()
__a : Tuple = config.to_diff_dict()
__a : Tuple = config.to_json_string()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__a : List[Any] = self.model_fpaa.get_memory_footprint()
__a : List[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__a : Tuple = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = BitsAndBytesConfig()
__a : Tuple = True
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' )
__a : List[Any] = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = BitsAndBytesConfig()
with self.assertRaises(__a ):
__a : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , load_in_abit=__a , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(__a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Optional[int] = self.model_fpaa.to(torch.floataa )
__a : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__a : List[Any] = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.float()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__a , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def __UpperCAmelCase ( cls ):
'''simple docstring'''
__a : Any = 't5-small'
__a : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__a : int = AutoTokenizer.from_pretrained(cls.model_name )
__a : Union[str, Any] = 'Translate in German: Hello, my dog is cute'
def __UpperCAmelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
__a : List[str] = None
# test with `t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : Any = model.generate(**__a )
# test with `flan-t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[Any] = model.generate(**__a )
__a : Optional[int] = modules
def __UpperCAmelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[str] = model.generate(**__a )
# test with `flan-t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : int = model.generate(**__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
__a : List[Any] = 'bigscience/bloom-560m'
__a : Union[str, Any] = 't5-small'
# Different types of model
__a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Sequence classification model
__a : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__a , device_map='auto' )
# CausalLM model
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Seq2seq model
__a : Any = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__a : str = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__a , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 'facebook/opt-350m'
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__a : Tuple = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__a : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__a ) ):
__a : str = LoRALayer(module.q_proj , rank=16 )
__a : str = LoRALayer(module.k_proj , rank=16 )
__a : Optional[int] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__a : int = model.forward(**__a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__a , __a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "gpt2-xl"
A_ = 3.3191854854152187
| 294
| 1
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = None
A_ = None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : TreeNode | None ):
# Validation
def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
_SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = None
A_ = None
A_ = None
A_ = None
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__a : Any = project_dim
__a : Optional[Any] = pooler_fn
__a : int = learn_encoder
__a : str = use_attention_mask
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = [r"pooler", r"logit_scale"]
A_ = [r"position_ids", r"predictions.decoder.bias"]
A_ = "roberta"
A_ = RobertaSeriesConfig
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[Any] = XLMRobertaModel(__a )
__a : str = nn.Linear(config.hidden_size , config.project_dim )
__a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a )
if self.has_pre_transformation:
__a : int = nn.Linear(config.hidden_size , config.project_dim )
__a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Tuple = self.base_model(
input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , )
if self.has_pre_transformation:
__a : Optional[Any] = outputs['hidden_states'][-2]
__a : Optional[int] = self.pre_LN(__a )
__a : Union[str, Any] = self.transformation_pre(__a )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__a : Optional[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 294
| 1
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ):
__a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[Any] = model
__a : Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
__a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
__a : Any = forward.__wrapped__
if forward == original_forward:
break
__a : str = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[str] = model
__a : Optional[int] = compiled_model
return model
def lowerCamelCase ():
PartialState().wait_for_everyone()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ):
for key, value in kwargs.items():
__a : Optional[int] = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a : Tuple = value
return destination
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ):
if port is None:
__a : List[str] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 294
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 1
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = CodeGenTokenizer
A_ = CodeGenTokenizerFast
A_ = True
A_ = {"add_prefix_space": True}
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Dict = {'unk_token': '<unk>'}
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = 'lower newer'
__a : Tuple = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : str = 'lower newer'
__a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
__a : List[str] = tokens + [tokenizer.unk_token]
__a : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : List[Any] = self.get_tokenizer()
__a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Any = 'lower newer'
# Testing tokenization
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
__a : Dict = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
__a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
__a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a )
__a : int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
__a : Any = tokens + [rust_tokenizer.unk_token]
__a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__a : List[Any] = 'This is a simple input'
__a : Tuple = ['This is a simple input 1', 'This is a simple input 2']
__a : Tuple = ('This is a simple input', 'This is a pair')
__a : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__a : str = 'This is a simple input'
__a : Any = ['This is a simple input looooooooong', 'This is a simple input']
__a : Optional[int] = ('This is a simple input', 'This is a pair')
__a : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__a : int = tokenizer.pad_token_id
__a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
__a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
__a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
__a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = '$$$'
__a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
__a : Union[str, Any] = 'This is a simple input'
__a : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
__a : List[Any] = tokenizer.bos_token_id
__a : List[str] = tokenizer(__a )
__a : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__a : Any = tokenizer.decode(out_s.input_ids )
__a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
__a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b'
__a : Optional[int] = tokenizer.encode(__a )
__a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
__a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 294
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowercase : str = logging.get_logger(__name__)
# General docstring
__lowercase : List[str] = 'MobileNetV1Config'
# Base docstring
__lowercase : Tuple = 'google/mobilenet_v1_1.0_224'
__lowercase : List[Any] = [1, 10_24, 7, 7]
# Image classification docstring
__lowercase : int = 'google/mobilenet_v1_1.0_224'
__lowercase : Any = 'tabby, tabby cat'
__lowercase : Dict = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
__a : Dict = {}
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = model.mobilenet_va
else:
__a : List[Any] = model
__a : Dict = 'MobilenetV1/Conv2d_0/'
__a : Dict = backbone.conv_stem.convolution.weight
__a : Optional[Any] = backbone.conv_stem.normalization.bias
__a : int = backbone.conv_stem.normalization.weight
__a : int = backbone.conv_stem.normalization.running_mean
__a : Tuple = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__a : int = i + 1
__a : Dict = i * 2
__a : Dict = backbone.layer[pt_index]
__a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
__a : Union[str, Any] = pointer.convolution.weight
__a : Optional[Any] = pointer.normalization.bias
__a : Union[str, Any] = pointer.normalization.weight
__a : List[Any] = pointer.normalization.running_mean
__a : Tuple = pointer.normalization.running_var
__a : List[str] = backbone.layer[pt_index + 1]
__a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
__a : Optional[int] = pointer.convolution.weight
__a : List[str] = pointer.normalization.bias
__a : Dict = pointer.normalization.weight
__a : Dict = pointer.normalization.running_mean
__a : Optional[int] = pointer.normalization.running_var
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__a : Optional[int] = model.classifier.weight
__a : List[Any] = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
__a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = array
# Build TF to PyTorch weights loading map
__a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
__a : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__a : Union[str, Any] = array.squeeze().transpose()
else:
__a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
__a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ):
__a , __a : Any = features.shape[-2:]
__a , __a : int = conv_layer.stride
__a , __a : Any = conv_layer.kernel_size
if in_height % stride_height == 0:
__a : int = max(kernel_height - stride_height , 0 )
else:
__a : int = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__a : Any = max(kernel_width - stride_width , 0 )
else:
__a : str = max(kernel_width - (in_width % stride_width) , 0 )
__a : int = pad_along_width // 2
__a : Dict = pad_along_width - pad_left
__a : List[str] = pad_along_height // 2
__a : Union[str, Any] = pad_along_height - pad_top
__a : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 )
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
__a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__a : Union[str, Any] = nn.Convad(
in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , )
if use_normalization:
__a : List[str] = nn.BatchNormad(
num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , )
else:
__a : Tuple = None
if use_activation:
if isinstance(__a , __a ):
__a : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __a ):
__a : Union[str, Any] = ACTaFN[config.hidden_act]
else:
__a : Dict = config.hidden_act
else:
__a : List[Any] = None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.config.tf_padding:
__a : Union[str, Any] = apply_tf_padding(__a , self.convolution )
__a : Union[str, Any] = self.convolution(__a )
if self.normalization is not None:
__a : str = self.normalization(__a )
if self.activation is not None:
__a : Optional[int] = self.activation(__a )
return features
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = MobileNetVaConfig
A_ = load_tf_weights_in_mobilenet_va
A_ = "mobilenet_v1"
A_ = "pixel_values"
A_ = False
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if isinstance(__a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a = True ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[int] = config
__a : str = 32
__a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth )
__a : Union[str, Any] = MobileNetVaConvLayer(
__a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , )
__a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__a : Any = nn.ModuleList()
for i in range(13 ):
__a : Union[str, Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=1 , ) )
__a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__a : Union[str, Any] = self.conv_stem(__a )
__a : Any = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__a : List[str] = layer_module(__a )
if output_hidden_states:
__a : List[Any] = all_hidden_states + (hidden_states,)
__a : str = hidden_states
if self.pooler is not None:
__a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 )
else:
__a : int = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__a , pooler_output=__a , hidden_states=__a , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Tuple = config.num_labels
__a : Tuple = MobileNetVaModel(__a )
__a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a )
__a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a )
__a : List[str] = outputs.pooler_output if return_dict else outputs[1]
__a : int = self.classifier(self.dropout(__a ) )
__a : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a : str = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a : int = 'single_label_classification'
else:
__a : Optional[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
__a : Optional[Any] = MSELoss()
if self.num_labels == 1:
__a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a : Any = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
__a : List[str] = CrossEntropyLoss()
__a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a : Tuple = BCEWithLogitsLoss()
__a : Optional[int] = loss_fct(__a , __a )
if not return_dict:
__a : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
| 294
| 1
|
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__lowercase : Tuple = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] ):
warnings.warn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
requires_backends(_SCREAMING_SNAKE_CASE , 'sklearn' )
return (preds == labels).mean()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ):
warnings.warn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
requires_backends(_SCREAMING_SNAKE_CASE , 'sklearn' )
__a : Dict = simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[int] = fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
warnings.warn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
requires_backends(_SCREAMING_SNAKE_CASE , 'sklearn' )
__a : Tuple = pearsonr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
__a : Any = spearmanr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any ):
warnings.warn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
requires_backends(_SCREAMING_SNAKE_CASE , 'sklearn' )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), F"""Predictions and labels have mismatched lengths {len(_SCREAMING_SNAKE_CASE )} and {len(_SCREAMING_SNAKE_CASE )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif task_name == "mrpc":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif task_name == "sts-b":
return pearson_and_spearman(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif task_name == "qqp":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif task_name == "rte":
return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif task_name == "hans":
return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
warnings.warn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
requires_backends(_SCREAMING_SNAKE_CASE , 'sklearn' )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(F"""Predictions and labels have mismatched lengths {len(_SCREAMING_SNAKE_CASE )} and {len(_SCREAMING_SNAKE_CASE )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(_SCREAMING_SNAKE_CASE )
| 294
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowercase : str = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ):
__a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
__a : Optional[Any] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
__a : Dict = BeautifulSoup(html.text , 'html.parser' )
__a : List[str] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE )
__a : List[str] = json.loads(_SCREAMING_SNAKE_CASE )
__a : List[Any] = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
__a : Tuple = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , )
__a : Optional[Any] = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
__a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Dict = urllib.request.build_opener()
__a : Union[str, Any] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
__a : List[Any] = F"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__lowercase : Optional[int] = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 294
| 1
|
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
return EnvironmentCommand()
class __UpperCamelCase ( lowerCAmelCase_ ):
@staticmethod
def __UpperCAmelCase ( __a ):
'''simple docstring'''
__a : Tuple = parser.add_parser('env' )
download_parser.set_defaults(func=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = huggingface_hub.__version__
__a : Optional[int] = 'not installed'
__a : Optional[Any] = 'NA'
if is_torch_available():
import torch
__a : Any = torch.__version__
__a : int = torch.cuda.is_available()
__a : str = 'not installed'
if is_transformers_available():
import transformers
__a : List[Any] = transformers.__version__
__a : Dict = 'not installed'
if is_accelerate_available():
import accelerate
__a : Any = accelerate.__version__
__a : int = 'not installed'
if is_xformers_available():
import xformers
__a : Dict = xformers.__version__
__a : Union[str, Any] = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(__a ) )
return info
@staticmethod
def __UpperCAmelCase ( __a ):
'''simple docstring'''
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 294
|
'''simple docstring'''
import os
def lowerCamelCase ():
with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file:
__a : List[Any] = str(file.readlines()[0] )
__a : str = names.replace('"' , '' ).split(',' )
names.sort()
__a : Union[str, Any] = 0
__a : Tuple = 0
for i, name in enumerate(_SCREAMING_SNAKE_CASE ):
for letter in name:
name_score += ord(_SCREAMING_SNAKE_CASE ) - 64
total_score += (i + 1) * name_score
__a : Any = 0
return total_score
if __name__ == "__main__":
print(solution())
| 294
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : Union[str, Any] = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "roformer"
def __init__( self , __a=5_0000 , __a=None , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1536 , __a=2 , __a=0.02 , __a=1E-1_2 , __a=0 , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
__a : Any = vocab_size
__a : Optional[int] = hidden_size if embedding_size is None else embedding_size
__a : Optional[Any] = hidden_size
__a : List[str] = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : List[str] = hidden_act
__a : Optional[Any] = intermediate_size
__a : Any = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : List[str] = max_position_embeddings
__a : Union[str, Any] = type_vocab_size
__a : Optional[Any] = initializer_range
__a : Dict = layer_norm_eps
__a : Dict = rotary_value
__a : Tuple = use_cache
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'sequence'}
__a : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 294
|
'''simple docstring'''
__lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__lowercase : List[str] = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Any = start
# add current to visited
visited.append(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(_SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
__lowercase : Union[str, Any] = topological_sort('a', [], [])
print(sort)
| 294
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : str = logging.get_logger(__name__)
__lowercase : str = torch.device('cpu')
def lowerCamelCase ():
__a : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : List[str] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Tuple = dct.pop(_SCREAMING_SNAKE_CASE )
__a : List[str] = val
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : Any = []
for k in state_dict.keys():
__a : Any = k
if ".pwconv" in k:
__a : Tuple = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
__a : List[str] = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
__a : Optional[int] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
__a : int = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
__a : Union[str, Any] = k_new.split('.' )
if ls[2].isdigit():
__a : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
__a : Tuple = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Union[str, Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__a : List[Any] = 1_000
__a : List[str] = 'huggingface/label-files'
__a : Union[str, Any] = 'imagenet-1k-id2label.json'
__a : List[str] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__a : List[Any] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a : List[Any] = idalabel
__a : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__a : List[Any] = [3, 3, 6, 4]
__a : Optional[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__a : Optional[int] = [3, 3, 9, 6]
__a : Optional[Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__a : List[Any] = [4, 3, 10, 5]
__a : Tuple = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__a : Union[str, Any] = [4, 4, 12, 6]
__a : Optional[int] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
__a : Union[str, Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE )
else:
__a : Dict = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
__a : Optional[int] = checkpoint
__a : Tuple = create_rename_keys(_SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
__a : Optional[Any] = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# prepare test inputs
__a : Dict = prepare_img()
__a : Tuple = ViTImageProcessor.from_pretrained('preprocessor_config' )
__a : List[Any] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
__a : str = get_expected_output(_SCREAMING_SNAKE_CASE )
__a : Any = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
__lowercase : str = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 294
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__lowercase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self , __a = 1 , __a = 100 , __a = None , __a = None , __a = True , ):
'''simple docstring'''
if audio_length_in_s is None:
__a : Tuple = self.unet.config.sample_size / self.unet.config.sample_rate
__a : int = audio_length_in_s * self.unet.config.sample_rate
__a : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
__a : List[str] = int(__a )
if sample_size % down_scale_factor != 0:
__a : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
' process.' )
__a : Optional[int] = int(__a )
__a : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
__a : Tuple = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__a : Dict = randn_tensor(__a , generator=__a , device=self.device , dtype=__a )
# set step values
self.scheduler.set_timesteps(__a , device=audio.device )
__a : List[Any] = self.scheduler.timesteps.to(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__a : List[str] = self.unet(__a , __a ).sample
# 2. compute previous image: x_t -> t_t-1
__a : Optional[int] = self.scheduler.step(__a , __a , __a ).prev_sample
__a : Dict = audio.clamp(-1 , 1 ).float().cpu().numpy()
__a : List[str] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__a )
| 294
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Tuple = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowercase : str = logging.get_logger(__name__)
__lowercase : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowercase : Union[str, Any] = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowercase : List[str] = {'mobilebert-uncased': 5_12}
__lowercase : Dict = {}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = MobileBertTokenizer
def __init__( self , __a=None , __a=None , __a=True , __a="[UNK]" , __a="[SEP]" , __a="[PAD]" , __a="[CLS]" , __a="[MASK]" , __a=True , __a=None , **__a , ):
'''simple docstring'''
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
__a : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __a ) != do_lower_case
or normalizer_state.get('strip_accents' , __a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __a ) != tokenize_chinese_chars
):
__a : Optional[int] = getattr(__a , normalizer_state.pop('type' ) )
__a : List[str] = do_lower_case
__a : Union[str, Any] = strip_accents
__a : str = tokenize_chinese_chars
__a : Optional[Any] = normalizer_class(**__a )
__a : Union[str, Any] = do_lower_case
def __UpperCAmelCase ( self , __a , __a=None ):
'''simple docstring'''
__a : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : Any = [self.sep_token_id]
__a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : Union[str, Any] = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 294
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'laion/clap-htsat-unfused'
__a : Optional[Any] = tempfile.mkdtemp()
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_tokenizer()
__a : List[str] = self.get_feature_extractor()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
__a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 )
__a : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : int = self.get_tokenizer()
__a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : int = floats_list((3, 1000) )
__a : str = feature_extractor(__a , return_tensors='np' )
__a : int = processor(audios=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_feature_extractor()
__a : Any = self.get_tokenizer()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Union[str, Any] = 'This is a test string'
__a : Union[str, Any] = processor(text=__a )
__a : Tuple = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : str = self.get_tokenizer()
__a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Optional[int] = processor.batch_decode(__a )
__a : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_feature_extractor()
__a : Optional[int] = self.get_tokenizer()
__a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 294
| 1
|
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __UpperCamelCase :
def __init__( self , __a , __a , __a , __a , __a , __a=0.2 , __a=0.2 ):
'''simple docstring'''
__a : List[str] = bp_numa
__a : Union[str, Any] = bp_numa
__a : Union[str, Any] = bp_numa
__a : Optional[int] = conva_get[:2]
__a : List[str] = conva_get[2]
__a : str = size_pa
__a : Optional[Any] = rate_w
__a : Any = rate_t
__a : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__a : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__a : List[str] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__a : Dict = -2 * np.random.rand(self.conva[1] ) + 1
__a : int = -2 * np.random.rand(self.num_bpa ) + 1
__a : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[str] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__a , 'wb' ) as f:
pickle.dump(__a , __a )
print(f"""Model saved: {save_path}""" )
@classmethod
def __UpperCAmelCase ( cls , __a ):
'''simple docstring'''
with open(__a , 'rb' ) as f:
__a : List[Any] = pickle.load(__a ) # noqa: S301
__a : Union[str, Any] = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
__a : List[Any] = model_dic.get('size_pooling1' )
__a : List[str] = model_dic.get('num_bp1' )
__a : List[Any] = model_dic.get('num_bp2' )
__a : Optional[int] = model_dic.get('num_bp3' )
__a : Tuple = model_dic.get('rate_weight' )
__a : Optional[int] = model_dic.get('rate_thre' )
# create model instance
__a : str = CNN(__a , __a , __a , __a , __a , __a , __a )
# modify model parameter
__a : List[Any] = model_dic.get('w_conv1' )
__a : int = model_dic.get('wkj' )
__a : List[Any] = model_dic.get('vji' )
__a : int = model_dic.get('thre_conv1' )
__a : Optional[int] = model_dic.get('thre_bp2' )
__a : Dict = model_dic.get('thre_bp3' )
return conv_ins
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return round(__a , 3 )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = convs[0]
__a : Dict = convs[1]
__a : Tuple = np.shape(__a )[0]
# get the data slice of original image data, data_focus
__a : Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , __a ):
for j_focus in range(0 , size_data - size_conv + 1 , __a ):
__a : Tuple = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__a )
# calculate the feature map of every single kernel, and saved as list of matrix
__a : Dict = []
__a : str = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__a ):
__a : int = []
for i_focus in range(len(__a ) ):
__a : Optional[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__a ) )
__a : Tuple = np.asmatrix(__a ).reshape(
__a , __a )
data_featuremap.append(__a )
# expanding the data slice to One dimenssion
__a : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__a ) )
__a : Tuple = np.asarray(__a )
return focus_list, data_featuremap
def __UpperCAmelCase ( self , __a , __a , __a="average_pool" ):
'''simple docstring'''
__a : List[Any] = len(featuremaps[0] )
__a : Dict = int(size_map / size_pooling )
__a : Optional[int] = []
for i_map in range(len(__a ) ):
__a : List[str] = featuremaps[i_map]
__a : Dict = []
for i_focus in range(0 , __a , __a ):
for j_focus in range(0 , __a , __a ):
__a : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__a ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__a ) )
__a : Dict = np.asmatrix(__a ).reshape(__a , __a )
featuremap_pooled.append(__a )
return featuremap_pooled
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[Any] = []
for i in range(len(__a ) ):
__a : str = np.shape(data[i] )
__a : Optional[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
__a : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(__a )
__a : Any = np.asarray(__a )
return data_expanded
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Union[str, Any] = np.asarray(__a )
__a : str = np.shape(__a )
__a : Dict = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : int = []
__a : int = 0
for i_map in range(__a ):
__a : Tuple = np.ones((size_map, size_map) )
for i in range(0 , __a , __a ):
for j in range(0 , __a , __a ):
__a : List[Any] = pd_pool[
i_pool
]
__a : Union[str, Any] = i_pool + 1
__a : str = np.multiply(
__a , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__a )
return pd_all
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a=bool ):
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__a )) )
print((' - - Shape: Teach_Data ', np.shape(__a )) )
__a : Union[str, Any] = 0
__a : Optional[int] = []
__a : List[str] = 1_0000
while rp < n_repeat and mse >= error_accuracy:
__a : Optional[int] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(__a ) ):
# print('------------Learning Image: %d--------------'%p)
__a : Any = np.asmatrix(datas_train[p] )
__a : str = np.asarray(datas_teach[p] )
__a , __a : Optional[Any] = self.convolute(
__a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a : Optional[int] = self.pooling(__a , self.size_poolinga )
__a : List[Any] = np.shape(__a )
__a : str = self._expand(__a )
__a : Optional[int] = data_bp_input
__a : Optional[Any] = np.dot(__a , self.vji.T ) - self.thre_bpa
__a : str = self.sig(__a )
__a : Any = np.dot(__a , self.wkj.T ) - self.thre_bpa
__a : Dict = self.sig(__a )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__a : str = np.multiply(
(data_teach - bp_outa) , np.multiply(__a , (1 - bp_outa) ) )
__a : Optional[int] = np.multiply(
np.dot(__a , self.wkj ) , np.multiply(__a , (1 - bp_outa) ) )
__a : Tuple = np.dot(__a , self.vji )
__a : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
__a : Optional[Any] = pd_conva_pooled.T.getA().tolist()
__a : Any = self._calculate_gradient_from_pool(
__a , __a , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__a : List[str] = self._expand_mat(pd_conva_all[k_conv] )
__a : Union[str, Any] = self.rate_weight * np.dot(__a , __a )
__a : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__a : Union[str, Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__a : Any = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__a : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__a : Union[str, Any] = self.thre_bpa - pd_k_all * self.rate_thre
__a : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__a : Tuple = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__a : int = rp + 1
__a : Tuple = error_count / patterns
all_mse.append(__a )
def draw_error():
__a : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__a , '+-' )
plt.plot(__a , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__a , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Any = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__a )) )
for p in range(len(__a ) ):
__a : Optional[int] = np.asmatrix(datas_test[p] )
__a , __a : int = self.convolute(
__a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a : Optional[int] = self.pooling(__a , self.size_poolinga )
__a : List[str] = self._expand(__a )
__a : Dict = data_bp_input
__a : int = bp_outa * self.vji.T - self.thre_bpa
__a : Any = self.sig(__a )
__a : List[Any] = bp_outa * self.wkj.T - self.thre_bpa
__a : Optional[int] = self.sig(__a )
produce_out.extend(bp_outa.getA().tolist() )
__a : Dict = [list(map(self.do_round , __a ) ) for each in produce_out]
return np.asarray(__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = np.asmatrix(__a )
__a , __a : int = self.convolute(
__a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a : Tuple = self.pooling(__a , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 294
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
__a : int = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = seq_length
__a : List[str] = is_training
__a : Any = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Any = use_labels
__a : List[str] = vocab_size
__a : str = hidden_size
__a : List[str] = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[int] = intermediate_size
__a : Tuple = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : Dict = type_vocab_size
__a : Any = type_sequence_label_size
__a : Dict = initializer_range
__a : Optional[Any] = num_labels
__a : Optional[Any] = num_choices
__a : Union[str, Any] = relative_attention
__a : List[str] = position_biased_input
__a : List[Any] = pos_att_type
__a : Tuple = scope
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_input_mask:
__a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__a : Any = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[int] = None
__a : int = None
__a : Dict = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
__a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0]
__a : str = model(__a , token_type_ids=__a )[0]
__a : Optional[int] = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : int = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
__a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[Any] = self.num_labels
__a : List[Any] = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
__a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__a : str = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = config_and_inputs
__a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = DebertaVaModelTester(self )
__a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : str = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a : int = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
__a : str = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 294
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : int = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ):
__a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[Any] = model
__a : Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
__a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
__a : Any = forward.__wrapped__
if forward == original_forward:
break
__a : str = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[str] = model
__a : Optional[int] = compiled_model
return model
def lowerCamelCase ():
PartialState().wait_for_everyone()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ):
for key, value in kwargs.items():
__a : Optional[int] = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a : Tuple = value
return destination
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ):
if port is None:
__a : List[str] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 294
| 1
|
'''simple docstring'''
__lowercase : Optional[int] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__lowercase : Dict = [{'type': 'code', 'content': INSTALL_CONTENT}]
__lowercase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 294
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 294
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase : List[Any] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "ernie_m"
A_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __a = 25_0002 , __a = 768 , __a = 12 , __a = 12 , __a = 3072 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 514 , __a = 0.02 , __a = 1 , __a = 1E-0_5 , __a=None , __a=False , __a=0.0 , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
__a : int = vocab_size
__a : Dict = hidden_size
__a : str = num_hidden_layers
__a : Dict = num_attention_heads
__a : List[str] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Any = max_position_embeddings
__a : int = initializer_range
__a : Dict = layer_norm_eps
__a : int = classifier_dropout
__a : Dict = is_decoder
__a : int = act_dropout
| 294
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = None
A_ = None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : TreeNode | None ):
# Validation
def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
_SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
from collections.abc import Callable
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Callable[[float], float] , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ):
__a : float = a
__a : float = b
if function(_SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(_SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
__a : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(_SCREAMING_SNAKE_CASE ) * function(_SCREAMING_SNAKE_CASE ) < 0:
__a : Any = mid
else:
__a : Dict = mid
__a : Optional[Any] = start + (end - start) / 2.0
return mid
def lowerCamelCase (_SCREAMING_SNAKE_CASE : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 294
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
from transformers.testing_utils import pytest_terminal_summary_main
__a : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
| 294
| 1
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
while a != 0:
__a , __a : int = b % a, a
return b
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) != 1:
__a : List[str] = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(_SCREAMING_SNAKE_CASE )
__a , __a , __a : List[Any] = 1, 0, a
__a , __a , __a : List[str] = 0, 1, m
while va != 0:
__a : Optional[Any] = ua // va
__a , __a , __a , __a , __a , __a : Optional[int] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 294
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__lowercase : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
__lowercase : Dict = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
re.sub('<n>' , '' , _SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
| 294
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = StableDiffusionInstructPixaPixPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__a : Tuple = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__a : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a : List[Any] = CLIPTextModel(__a )
__a : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : str = Image.fromarray(np.uinta(__a ) ).convert('RGB' )
if str(__a ).startswith('mps' ):
__a : Union[str, Any] = torch.manual_seed(__a )
else:
__a : List[str] = torch.Generator(device=__a ).manual_seed(__a )
__a : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Any = StableDiffusionInstructPixaPixPipeline(**__a )
__a : Optional[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : List[str] = self.get_dummy_inputs(__a )
__a : Dict = sd_pipe(**__a ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : Optional[int] = self.get_dummy_components()
__a : Dict = StableDiffusionInstructPixaPixPipeline(**__a )
__a : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : str = self.get_dummy_inputs(__a )
__a : Tuple = 'french fries'
__a : Optional[Any] = sd_pipe(**__a , negative_prompt=__a )
__a : Any = output.images
__a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : Optional[int] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : Optional[Any] = self.get_dummy_components()
__a : List[Any] = StableDiffusionInstructPixaPixPipeline(**__a )
__a : Optional[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : str = self.get_dummy_inputs(__a )
__a : Union[str, Any] = [inputs['prompt']] * 2
__a : List[Any] = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
__a : Dict = torch.from_numpy(__a ).unsqueeze(0 ).to(__a )
__a : List[str] = image / 2 + 0.5
__a : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__a : Any = image.repeat(2 , 1 , 1 , 1 )
__a : str = sd_pipe(**__a ).images
__a : int = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__a : str = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : Optional[int] = self.get_dummy_components()
__a : Any = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' )
__a : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**__a )
__a : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : List[Any] = self.get_dummy_inputs(__a )
__a : str = sd_pipe(**__a ).images
__a : Optional[int] = image[0, -3:, -3:, -1]
__a : Optional[int] = [round(__a , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(__a ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__a : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_dummy_components()
__a : Optional[int] = StableDiffusionInstructPixaPixPipeline(**__a )
__a : Optional[int] = VaeImageProcessor(do_resize=__a , do_normalize=__a )
__a : List[str] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Dict = pipe(**self.get_dummy_inputs_by_type(__a , input_image_type='pt' ) )[0]
__a : Union[str, Any] = components['vae']
__a : Optional[int] = self.get_dummy_inputs_by_type(__a , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__a : List[Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
__a : Optional[int] = pipe(**__a )[0]
__a : Union[str, Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(__a , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self , __a=0 ):
'''simple docstring'''
__a : str = torch.manual_seed(__a )
__a : Tuple = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
__a : Union[str, Any] = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Optional[int] = self.get_inputs()
__a : List[Any] = pipe(**__a ).images
__a : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__a : int = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__a )
__a : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Optional[Any] = self.get_inputs()
__a : Optional[int] = pipe(**__a ).images
__a : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__a : int = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__a )
__a : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : str = self.get_inputs()
__a : int = pipe(**__a ).images
__a : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__a : Optional[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = 0
def callback_fn(__a , __a , __a ) -> None:
__a : str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__a : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__a : Optional[int] = latents[0, -3:, -3:, -1]
__a : List[Any] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__a : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__a : int = latents[0, -3:, -3:, -1]
__a : Tuple = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__a : Any = False
__a : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__a , torch_dtype=torch.floataa )
__a : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Dict = self.get_inputs()
pipe(**__a , callback=__a , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__a , torch_dtype=torch.floataa )
__a : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : List[Any] = self.get_inputs()
__a : Dict = pipe(**__a )
__a : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__a : int = inputs['image'].resize((504, 504) )
__a : Dict = 'timbrooks/instruct-pix2pix'
__a : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Dict = pipe(**__a )
__a : int = output.images[0]
__a : Any = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__a : Any = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 294
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowercase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowercase : Any = [0, 25, 50]
__lowercase : int = [25, 50, 75]
__lowercase : List[str] = fuzz.membership.trimf(X, abca)
__lowercase : Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowercase : List[Any] = np.ones(75)
__lowercase : Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowercase : str = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowercase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowercase : Optional[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowercase : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowercase : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowercase : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 294
| 1
|
'''simple docstring'''
from math import factorial
class __UpperCamelCase :
def __init__( self , __a , __a ):
'''simple docstring'''
__a : List[str] = real
if isinstance(__a , __a ):
__a : int = [1] * rank
else:
__a : Optional[int] = rank
def __repr__( self ):
'''simple docstring'''
return (
f"""{self.real}+"""
f"""{"+".join(str(__a )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __a )
def __add__( self , __a ):
'''simple docstring'''
if not isinstance(__a , __a ):
return Dual(self.real + other , self.duals )
__a : Dict = self.duals.copy()
__a : List[Any] = other.duals.copy()
if len(__a ) > len(__a ):
o_dual.extend([1] * (len(__a ) - len(__a )) )
elif len(__a ) < len(__a ):
s_dual.extend([1] * (len(__a ) - len(__a )) )
__a : Union[str, Any] = []
for i in range(len(__a ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __a )
A_ = __add__
def __sub__( self , __a ):
'''simple docstring'''
return self + other * -1
def __mul__( self , __a ):
'''simple docstring'''
if not isinstance(__a , __a ):
__a : Dict = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __a )
__a : int = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __a )
A_ = __mul__
def __truediv__( self , __a ):
'''simple docstring'''
if not isinstance(__a , __a ):
__a : List[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __a )
raise ValueError
def __floordiv__( self , __a ):
'''simple docstring'''
if not isinstance(__a , __a ):
__a : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __a )
raise ValueError
def __pow__( self , __a ):
'''simple docstring'''
if n < 0 or isinstance(__a , __a ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
__a : List[str] = self
for _ in range(n - 1 ):
x *= self
return x
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
if not callable(_SCREAMING_SNAKE_CASE ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(_SCREAMING_SNAKE_CASE , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('differentiate() requires an int as input for order' )
__a : str = Dual(_SCREAMING_SNAKE_CASE , 1 )
__a : Any = func(_SCREAMING_SNAKE_CASE )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 294
|
'''simple docstring'''
import sys
__lowercase : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : List[str] = 1
for digit in s:
product *= int(_SCREAMING_SNAKE_CASE )
return product
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = N ):
__a : Optional[int] = -sys.maxsize - 1
__a : Optional[Any] = n[:13]
__a : int = 13
while cur_index < len(_SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__a : List[Any] = substr[1:] + n[cur_index]
cur_index += 1
else:
__a : Dict = max(_SCREAMING_SNAKE_CASE , str_eval(_SCREAMING_SNAKE_CASE ) )
__a : Optional[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 294
| 1
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int=0 ):
__a : str = []
for old_item in old_list:
__a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' )
__a : str = new_item.replace('in_layers.2' , 'conv1' )
__a : Optional[Any] = new_item.replace('out_layers.0' , 'norm2' )
__a : Tuple = new_item.replace('out_layers.3' , 'conv2' )
__a : List[Any] = new_item.replace('emb_layers.1' , 'time_emb_proj' )
__a : int = new_item.replace('skip_connection' , 'conv_shortcut' )
__a : Tuple = shave_segments(_SCREAMING_SNAKE_CASE , n_shave_prefix_segments=_SCREAMING_SNAKE_CASE )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict=0 ):
__a : List[str] = []
for old_item in old_list:
__a : Optional[Any] = old_item
__a : int = new_item.replace('norm.weight' , 'group_norm.weight' )
__a : int = new_item.replace('norm.bias' , 'group_norm.bias' )
__a : int = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
__a : str = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
__a : Dict = shave_segments(_SCREAMING_SNAKE_CASE , n_shave_prefix_segments=_SCREAMING_SNAKE_CASE )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Optional[int]=None ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__a : Dict = old_checkpoint[path]
__a : List[str] = old_tensor.shape[0] // 3
__a : str = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__a : Tuple = old_tensor.shape[0] // config['num_head_channels'] // 3
__a : Optional[int] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__a , __a , __a : str = old_tensor.split(channels // num_heads , dim=1 )
__a : Any = query.reshape(_SCREAMING_SNAKE_CASE )
__a : List[str] = key.reshape(_SCREAMING_SNAKE_CASE )
__a : List[Any] = value.reshape(_SCREAMING_SNAKE_CASE )
for path in paths:
__a : int = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__a : Tuple = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
__a : Dict = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
__a : Optional[int] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
__a : List[Any] = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__a : List[str] = old_checkpoint[path['old']][:, :, 0]
else:
__a : Optional[int] = old_checkpoint[path['old']]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] ):
__a : int = {}
__a : Dict = checkpoint['time_embed.0.weight']
__a : Union[str, Any] = checkpoint['time_embed.0.bias']
__a : List[str] = checkpoint['time_embed.2.weight']
__a : Any = checkpoint['time_embed.2.bias']
__a : List[Any] = checkpoint['input_blocks.0.0.weight']
__a : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
__a : Optional[int] = checkpoint['out.0.weight']
__a : List[str] = checkpoint['out.0.bias']
__a : Any = checkpoint['out.2.weight']
__a : str = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
__a : Optional[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
__a : Optional[int] = {
layer_id: [key for key in checkpoint if F"""input_blocks.{layer_id}""" in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the middle blocks only
__a : Tuple = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
__a : List[Any] = {
layer_id: [key for key in checkpoint if F"""middle_block.{layer_id}""" in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the output blocks only
__a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
__a : Tuple = {
layer_id: [key for key in checkpoint if F"""output_blocks.{layer_id}""" in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
for i in range(1 , _SCREAMING_SNAKE_CASE ):
__a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1)
__a : List[Any] = (i - 1) % (config['num_res_blocks'] + 1)
__a : Any = [key for key in input_blocks[i] if F"""input_blocks.{i}.0""" in key]
__a : str = [key for key in input_blocks[i] if F"""input_blocks.{i}.1""" in key]
if F"""input_blocks.{i}.0.op.weight""" in checkpoint:
__a : Optional[Any] = checkpoint[
F"""input_blocks.{i}.0.op.weight"""
]
__a : Any = checkpoint[
F"""input_blocks.{i}.0.op.bias"""
]
continue
__a : List[str] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
__a : List[str] = {'old': F"""input_blocks.{i}.0""", 'new': F"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
__a : List[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path, resnet_op] , config=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
__a : Optional[int] = renew_attention_paths(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {
'old': F"""input_blocks.{i}.1""",
'new': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
__a : Optional[int] = {
F"""input_blocks.{i}.1.qkv.bias""": {
'key': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""input_blocks.{i}.1.qkv.weight""": {
'key': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE , )
__a : Union[str, Any] = middle_blocks[0]
__a : List[Any] = middle_blocks[1]
__a : Tuple = middle_blocks[2]
__a : Any = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
__a : Dict = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
__a : List[Any] = renew_attention_paths(_SCREAMING_SNAKE_CASE )
__a : Any = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_paths_to_split=_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
__a : Tuple = i // (config['num_res_blocks'] + 1)
__a : List[str] = i % (config['num_res_blocks'] + 1)
__a : Optional[Any] = [shave_segments(_SCREAMING_SNAKE_CASE , 2 ) for name in output_blocks[i]]
__a : Dict = {}
for layer in output_block_layers:
__a , __a : Optional[int] = layer.split('.' )[0], shave_segments(_SCREAMING_SNAKE_CASE , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_SCREAMING_SNAKE_CASE )
else:
__a : str = [layer_name]
if len(_SCREAMING_SNAKE_CASE ) > 1:
__a : List[str] = [key for key in output_blocks[i] if F"""output_blocks.{i}.0""" in key]
__a : List[Any] = [key for key in output_blocks[i] if F"""output_blocks.{i}.1""" in key]
__a : str = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
__a : List[str] = {'old': F"""output_blocks.{i}.0""", 'new': F"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=_SCREAMING_SNAKE_CASE )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__a : List[str] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
__a : Optional[int] = checkpoint[
F"""output_blocks.{i}.{index}.conv.weight"""
]
__a : Any = checkpoint[
F"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(_SCREAMING_SNAKE_CASE ) == 2:
__a : List[str] = []
if len(_SCREAMING_SNAKE_CASE ):
__a : List[str] = renew_attention_paths(_SCREAMING_SNAKE_CASE )
__a : Optional[Any] = {
'old': F"""output_blocks.{i}.1""",
'new': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
__a : List[Any] = {
F"""output_blocks.{i}.1.qkv.bias""": {
'key': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""output_blocks.{i}.1.qkv.weight""": {
'key': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=_SCREAMING_SNAKE_CASE , )
else:
__a : Any = renew_resnet_paths(_SCREAMING_SNAKE_CASE , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__a : int = '.'.join(['output_blocks', str(_SCREAMING_SNAKE_CASE ), path['old']] )
__a : Dict = '.'.join(['up_blocks', str(_SCREAMING_SNAKE_CASE ), 'resnets', str(_SCREAMING_SNAKE_CASE ), path['new']] )
__a : Optional[int] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__lowercase : Union[str, Any] = parser.parse_args()
__lowercase : Dict = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowercase : Optional[int] = json.loads(f.read())
__lowercase : str = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowercase : Optional[int] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowercase : List[str] = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__lowercase : Optional[int] = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__lowercase : int = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 294
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = CodeGenTokenizer
A_ = CodeGenTokenizerFast
A_ = True
A_ = {"add_prefix_space": True}
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
__a : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Dict = {'unk_token': '<unk>'}
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = 'lower newer'
__a : Tuple = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : str = 'lower newer'
__a : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
__a : List[str] = tokens + [tokenizer.unk_token]
__a : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : List[Any] = self.get_tokenizer()
__a : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Any = 'lower newer'
# Testing tokenization
__a : Dict = tokenizer.tokenize(__a , add_prefix_space=__a )
__a : Dict = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
__a : int = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__a : Tuple = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
__a : Tuple = self.get_rust_tokenizer(add_prefix_space=__a )
__a : Union[str, Any] = tokenizer.encode(__a , add_prefix_space=__a )
__a : int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
__a : Any = tokens + [rust_tokenizer.unk_token]
__a : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__a : List[Any] = 'This is a simple input'
__a : Tuple = ['This is a simple input 1', 'This is a simple input 2']
__a : Tuple = ('This is a simple input', 'This is a pair')
__a : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__a : str = 'This is a simple input'
__a : Any = ['This is a simple input looooooooong', 'This is a simple input']
__a : Optional[int] = ('This is a simple input', 'This is a pair')
__a : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
__a : int = tokenizer.pad_token_id
__a : List[Any] = tokenizer(__a , padding='max_length' , max_length=30 , return_tensors='np' )
__a : Union[str, Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
__a : Optional[Any] = tokenizer(*__a , padding='max_length' , max_length=60 , return_tensors='np' )
__a : List[Any] = tokenizer(__a , padding=__a , truncate=__a , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = '$$$'
__a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
__a : Union[str, Any] = 'This is a simple input'
__a : List[Any] = ['This is a simple input 1', 'This is a simple input 2']
__a : List[Any] = tokenizer.bos_token_id
__a : List[str] = tokenizer(__a )
__a : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__a : Any = tokenizer.decode(out_s.input_ids )
__a : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__a : Optional[int] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
__a : Tuple = '\nif len_a > len_b: result = a\nelse: result = b'
__a : Optional[int] = tokenizer.encode(__a )
__a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
__a : Tuple = tokenizer.decode(__a , truncate_before_pattern=__a )
self.assertEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 294
| 1
|
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__a , 'num_attention_heads' ) )
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=3 , __a=2 , __a=1 , __a=16 , __a=[128, 256, 384] , __a=[4, 6, 8] , __a=[2, 3, 4] , __a=[16, 16, 16] , __a=0 , __a=[2, 2, 2] , __a=[2, 2, 2] , __a=0.02 , __a=True , __a=True , __a=2 , ):
'''simple docstring'''
__a : List[Any] = parent
__a : Dict = batch_size
__a : List[Any] = image_size
__a : Dict = num_channels
__a : Optional[Any] = kernel_size
__a : str = stride
__a : Optional[int] = padding
__a : Union[str, Any] = hidden_sizes
__a : Any = num_attention_heads
__a : List[Any] = depths
__a : Optional[Any] = key_dim
__a : Optional[int] = drop_path_rate
__a : Union[str, Any] = patch_size
__a : Optional[Any] = attention_ratio
__a : Any = mlp_ratio
__a : Any = initializer_range
__a : Union[str, Any] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__a : Optional[Any] = is_training
__a : List[str] = use_labels
__a : Any = num_labels
__a : List[str] = initializer_range
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Dict = None
if self.use_labels:
__a : Any = ids_tensor([self.batch_size] , self.num_labels )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = LevitModel(config=__a )
model.to(__a )
model.eval()
__a : List[str] = model(__a )
__a : List[Any] = (self.image_size, self.image_size)
__a , __a : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
__a : str = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__a : Optional[int] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Tuple = self.num_labels
__a : Any = LevitForImageClassification(__a )
model.to(__a )
model.eval()
__a : Union[str, Any] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.prepare_config_and_inputs()
__a , __a , __a : Any = config_and_inputs
__a : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = LevitModelTester(self )
__a : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = model_class(__a )
__a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : int = [*signature.parameters.keys()]
__a : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__a : Optional[Any] = model(**self._prepare_for_class(__a , __a ) )
__a : List[Any] = outputs.hidden_states
__a : List[Any] = len(self.model_tester.depths ) + 1
self.assertEqual(len(__a ) , __a )
__a : Any = (self.model_tester.image_size, self.model_tester.image_size)
__a , __a : str = image_size[0], image_size[1]
for _ in range(4 ):
__a : int = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__a : str = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__a , __a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : str = True
check_hidden_states_output(__a , __a , __a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a , __a , __a=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__a )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__a : Dict = model_class(__a )
model.to(__a )
model.train()
__a : Any = self._prepare_for_class(__a , __a , return_labels=__a )
__a : Dict = model(**__a ).loss
loss.backward()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__a : Optional[Any] = False
__a : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(__a ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__a : str = model_class(__a )
model.gradient_checkpointing_enable()
model.to(__a )
model.train()
__a : Optional[Any] = self._prepare_for_class(__a , __a , return_labels=__a )
__a : Optional[int] = model(**__a ).loss
loss.backward()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Dict = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__a ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
__a : Optional[int] = problem_type['title']
__a : int = problem_type['num_labels']
__a : List[str] = model_class(__a )
model.to(__a )
model.train()
__a : List[Any] = self._prepare_for_class(__a , __a , return_labels=__a )
if problem_type["num_labels"] > 1:
__a : Tuple = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
__a : Union[str, Any] = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__a ) as warning_list:
__a : Optional[int] = model(**__a ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[Any] = LevitModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__a )
__a : Dict = self.default_image_processor
__a : int = prepare_img()
__a : Dict = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
__a : Tuple = model(**__a )
# verify the logits
__a : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__a : Dict = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
| 294
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
__a : Dict = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_SCREAMING_SNAKE_CASE )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 1
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str]=28_123 ):
__a : Union[str, Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__a : List[Any] = set()
__a : str = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_SCREAMING_SNAKE_CASE )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 294
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__a : int = parent
__a : Dict = batch_size
__a : Optional[int] = num_channels
__a : List[Any] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : str = do_resize
__a : Optional[Any] = size
__a : str = apply_ocr
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'apply_ocr' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
__a : Any = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__a : Tuple = Image.open(ds[0]['file'] ).convert('RGB' )
__a : Optional[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__a : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
__a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__a )
__a : List[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 294
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : List[str] = create_tensor(_SCREAMING_SNAKE_CASE )
__a : List[str] = gather(_SCREAMING_SNAKE_CASE )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : str = [state.process_index]
__a : int = gather_object(_SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == state.num_processes, F"""{gathered_obj}, {len(_SCREAMING_SNAKE_CASE )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : int = create_tensor(_SCREAMING_SNAKE_CASE )
__a : str = broadcast(_SCREAMING_SNAKE_CASE )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
__a : str = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__a : Optional[Any] = torch.arange(state.num_processes ).to(state.device )
__a : int = pad_across_processes(_SCREAMING_SNAKE_CASE )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
# For now runs on only two processes
if state.num_processes != 2:
return
__a : str = create_tensor(_SCREAMING_SNAKE_CASE )
__a : Optional[Any] = reduce(_SCREAMING_SNAKE_CASE , 'sum' )
__a : List[str] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F"""{reduced_tensor} != {truth_tensor}"""
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
# For now runs on only two processes
if state.num_processes != 2:
return
__a : int = create_tensor(_SCREAMING_SNAKE_CASE )
__a : List[Any] = reduce(_SCREAMING_SNAKE_CASE , 'mean' )
__a : List[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F"""{reduced_tensor} != {truth_tensor}"""
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
# For xla_spawn (TPUs)
main()
def lowerCamelCase ():
__a : Dict = PartialState()
state.print(F"""State: {state}""" )
state.print('testing gather' )
test_gather(_SCREAMING_SNAKE_CASE )
state.print('testing gather_object' )
test_gather_object(_SCREAMING_SNAKE_CASE )
state.print('testing broadcast' )
test_broadcast(_SCREAMING_SNAKE_CASE )
state.print('testing pad_across_processes' )
test_pad_across_processes(_SCREAMING_SNAKE_CASE )
state.print('testing reduce_sum' )
test_reduce_sum(_SCREAMING_SNAKE_CASE )
state.print('testing reduce_mean' )
test_reduce_mean(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 294
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase : List[Any] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "ernie_m"
A_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __a = 25_0002 , __a = 768 , __a = 12 , __a = 12 , __a = 3072 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 514 , __a = 0.02 , __a = 1 , __a = 1E-0_5 , __a=None , __a=False , __a=0.0 , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
__a : int = vocab_size
__a : Dict = hidden_size
__a : str = num_hidden_layers
__a : Dict = num_attention_heads
__a : List[str] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Any = max_position_embeddings
__a : int = initializer_range
__a : Dict = layer_norm_eps
__a : int = classifier_dropout
__a : Dict = is_decoder
__a : int = act_dropout
| 294
| 1
|
'''simple docstring'''
from __future__ import annotations
__lowercase : int = list[list[int]]
# assigning initial values to the grid
__lowercase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__lowercase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Matrix , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Matrix ):
if location := find_empty_location(_SCREAMING_SNAKE_CASE ):
__a , __a : str = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Optional[int] = digit
if sudoku(_SCREAMING_SNAKE_CASE ) is not None:
return grid
__a : Tuple = 0
return None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Matrix ):
for row in grid:
for cell in row:
print(_SCREAMING_SNAKE_CASE , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
__lowercase : List[str] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 294
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a ):
'''simple docstring'''
super().__init__()
__a : int = module
__a : List[Any] = nn.Sequential(
nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , )
__a : int = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
return self.module(__a , *__a , **__a ) + self.adapter(__a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
A_ = "bigscience/bloom-1b7"
# Constant values
A_ = 2.109659552692574
A_ = "Hello my name is"
A_ = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
A_ = 10
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_abit.config
self.assertTrue(hasattr(__a , 'quantization_config' ) )
__a : Union[str, Any] = config.to_dict()
__a : Tuple = config.to_diff_dict()
__a : Tuple = config.to_json_string()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__a : List[Any] = self.model_fpaa.get_memory_footprint()
__a : List[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__a : Tuple = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = BitsAndBytesConfig()
__a : Tuple = True
__a : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' )
__a : List[Any] = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = BitsAndBytesConfig()
with self.assertRaises(__a ):
__a : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , load_in_abit=__a , device_map='auto' , bnb_abit_quant_type='nf4' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(__a ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(__a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__a : List[str] = self.tokenizer(self.input_text , return_tensors='pt' )
__a : Optional[int] = self.model_fpaa.to(torch.floataa )
__a : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__a : List[Any] = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
__a : Union[str, Any] = self.model_fpaa.float()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=__a , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def __UpperCAmelCase ( cls ):
'''simple docstring'''
__a : Any = 't5-small'
__a : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__a : int = AutoTokenizer.from_pretrained(cls.model_name )
__a : Union[str, Any] = 'Translate in German: Hello, my dog is cute'
def __UpperCAmelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__a : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
__a : List[str] = None
# test with `t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : Any = model.generate(**__a )
# test with `flan-t5-small`
__a : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[Any] = model.generate(**__a )
__a : Optional[int] = modules
def __UpperCAmelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__a : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : List[str] = model.generate(**__a )
# test with `flan-t5-small`
__a : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='auto' )
__a : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__a : int = model.generate(**__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
__a : List[Any] = 'bigscience/bloom-560m'
__a : Union[str, Any] = 't5-small'
# Different types of model
__a : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Sequence classification model
__a : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__a , device_map='auto' )
# CausalLM model
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='auto' )
# Seq2seq model
__a : Any = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__a , device_map='auto' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__a : str = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__a , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__a : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__a : str = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 'facebook/opt-350m'
super().setUp()
def __UpperCAmelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__a : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__a : Tuple = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__a : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__a ) ):
__a : str = LoRALayer(module.q_proj , rank=16 )
__a : str = LoRALayer(module.k_proj , rank=16 )
__a : Optional[int] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__a : List[str] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__a : int = model.forward(**__a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__a , __a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "gpt2-xl"
A_ = 3.3191854854152187
| 294
| 1
|
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __UpperCamelCase :
A_ = None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.feature_extraction_class(**self.feat_extract_dict )
__a : Any = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : Optional[int] = os.path.join(__a , 'feat_extract.json' )
feat_extract_first.to_json_file(__a )
__a : Tuple = self.feature_extraction_class.from_json_file(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : Dict = feat_extract_first.save_pretrained(__a )[0]
check_json_file_has_correct_format(__a )
__a : Optional[int] = self.feature_extraction_class.from_pretrained(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.feature_extraction_class()
self.assertIsNotNone(__a )
| 294
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = None
A_ = None
A_ = None
A_ = None
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__a : Any = project_dim
__a : Optional[Any] = pooler_fn
__a : int = learn_encoder
__a : str = use_attention_mask
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = [r"pooler", r"logit_scale"]
A_ = [r"position_ids", r"predictions.decoder.bias"]
A_ = "roberta"
A_ = RobertaSeriesConfig
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[Any] = XLMRobertaModel(__a )
__a : str = nn.Linear(config.hidden_size , config.project_dim )
__a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a )
if self.has_pre_transformation:
__a : int = nn.Linear(config.hidden_size , config.project_dim )
__a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Tuple = self.base_model(
input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , )
if self.has_pre_transformation:
__a : Optional[Any] = outputs['hidden_states'][-2]
__a : Optional[int] = self.pre_LN(__a )
__a : Union[str, Any] = self.transformation_pre(__a )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__a : Optional[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 294
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "sew-d"
def __init__( self , __a=32 , __a=768 , __a=12 , __a=12 , __a=3072 , __a=2 , __a=512 , __a=256 , __a=True , __a=True , __a=("p2c", "c2p") , __a="layer_norm" , __a="gelu_python" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.02 , __a=1E-7 , __a=1E-5 , __a="group" , __a="gelu" , __a=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a=False , __a=128 , __a=16 , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=0 , __a="mean" , __a=False , __a=False , __a=256 , __a=0 , __a=1 , __a=2 , **__a , ):
'''simple docstring'''
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
__a : Any = hidden_size
__a : List[str] = feat_extract_norm
__a : List[Any] = feat_extract_activation
__a : Tuple = list(__a )
__a : Union[str, Any] = list(__a )
__a : List[Any] = list(__a )
__a : Dict = conv_bias
__a : str = num_conv_pos_embeddings
__a : Optional[Any] = num_conv_pos_embedding_groups
__a : List[str] = len(self.conv_dim )
__a : str = num_hidden_layers
__a : Any = intermediate_size
__a : int = squeeze_factor
__a : str = max_position_embeddings
__a : int = position_buckets
__a : Any = share_att_key
__a : Dict = relative_attention
__a : List[Any] = norm_rel_ebd
__a : str = list(__a )
__a : Any = hidden_act
__a : List[Any] = num_attention_heads
__a : Dict = hidden_dropout
__a : Optional[Any] = attention_dropout
__a : List[str] = activation_dropout
__a : Any = feat_proj_dropout
__a : Dict = final_dropout
__a : int = layer_norm_eps
__a : Optional[Any] = feature_layer_norm_eps
__a : Optional[int] = initializer_range
__a : Optional[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : List[Any] = apply_spec_augment
__a : int = mask_time_prob
__a : int = mask_time_length
__a : str = mask_time_min_masks
__a : List[str] = mask_feature_prob
__a : Optional[int] = mask_feature_length
__a : Optional[int] = mask_feature_min_masks
# ctc loss
__a : Optional[Any] = ctc_loss_reduction
__a : int = ctc_zero_infinity
# sequence classification
__a : int = use_weighted_layer_sum
__a : Any = classifier_proj_size
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 294
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.