code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import datasets
from .evaluate import evaluate
_a = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
_a = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
_a = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ), codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"], reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"], )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int ):
__lowercase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
__lowercase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
__lowercase = evaluate(dataset=UpperCAmelCase__, predictions=UpperCAmelCase__ )
return score
| 17 |
def __UpperCamelCase ( _lowerCAmelCase ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_lowerCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 116 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
_SCREAMING_SNAKE_CASE = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=8 ) -> Optional[Any]:
snake_case = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : List[Any]=5_12 , __lowerCAmelCase : Optional[int]=5_12 ) -> Optional[Any]:
snake_case = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
snake_case = np.array(pil_image.convert("""RGB""" ) )
snake_case = arr.astype(np.floataa ) / 127.5 - 1
snake_case = np.transpose(__lowerCAmelCase , [2, 0, 1] )
snake_case = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 )
return image
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Tuple , __snake_case : UNetaDConditionModel , __snake_case : DDPMScheduler , __snake_case : VQModel , )-> Union[str, Any]:
super().__init__()
self.register_modules(
unet=__snake_case , scheduler=__snake_case , movq=__snake_case , )
snake_case = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase ( self : Dict , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple )-> Any:
# get the original timestep using init_timestep
snake_case = min(int(num_inference_steps * strength ) , __snake_case )
snake_case = max(num_inference_steps - init_timestep , 0 )
snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase ( self : List[str] , __snake_case : str , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Any , __snake_case : str=None )-> List[Any]:
if not isinstance(__snake_case , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__snake_case )}''' )
snake_case = image.to(device=__snake_case , dtype=__snake_case )
snake_case = batch_size * num_images_per_prompt
if image.shape[1] == 4:
snake_case = image
else:
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(__snake_case , __snake_case ):
snake_case = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case )
]
snake_case = torch.cat(__snake_case , dim=0 )
else:
snake_case = self.movq.encode(__snake_case ).latent_dist.sample(__snake_case )
snake_case = self.movq.config.scaling_factor * init_latents
snake_case = torch.cat([init_latents] , dim=0 )
snake_case = init_latents.shape
snake_case = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
snake_case = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
snake_case = init_latents
return latents
def lowerCAmelCase ( self : int , __snake_case : Optional[Any]=0 )-> Optional[int]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case = torch.device(f'''cuda:{gpu_id}''' )
snake_case = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case , __snake_case )
def lowerCAmelCase ( self : int , __snake_case : int=0 )-> List[Any]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
snake_case = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case = cpu_offload_with_hook(__snake_case , __snake_case , prev_module_hook=__snake_case )
# We'll offload the last model manually.
snake_case = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__snake_case , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__snake_case )
def __call__( self : int , __snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , __snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , __snake_case : int = 5_12 , __snake_case : int = 5_12 , __snake_case : int = 1_00 , __snake_case : float = 4.0 , __snake_case : float = 0.3 , __snake_case : int = 1 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , )-> Union[str, Any]:
snake_case = self._execution_device
snake_case = guidance_scale > 1.0
if isinstance(__snake_case , __snake_case ):
snake_case = torch.cat(__snake_case , dim=0 )
snake_case = image_embeds.shape[0]
if isinstance(__snake_case , __snake_case ):
snake_case = torch.cat(__snake_case , dim=0 )
if do_classifier_free_guidance:
snake_case = image_embeds.repeat_interleave(__snake_case , dim=0 )
snake_case = negative_image_embeds.repeat_interleave(__snake_case , dim=0 )
snake_case = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__snake_case )
if not isinstance(__snake_case , __snake_case ):
snake_case = [image]
if not all(isinstance(__snake_case , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'''Input is in incorrect format: {[type(__snake_case ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
snake_case = torch.cat([prepare_image(__snake_case , __snake_case , __snake_case ) for i in image] , dim=0 )
snake_case = image.to(dtype=image_embeds.dtype , device=__snake_case )
snake_case = self.movq.encode(__snake_case )["""latents"""]
snake_case = latents.repeat_interleave(__snake_case , dim=0 )
self.scheduler.set_timesteps(__snake_case , device=__snake_case )
snake_case , snake_case = self.get_timesteps(__snake_case , __snake_case , __snake_case )
snake_case = timesteps[:1].repeat(batch_size * num_images_per_prompt )
snake_case , snake_case = downscale_height_and_width(__snake_case , __snake_case , self.movq_scale_factor )
snake_case = self.prepare_latents(
__snake_case , __snake_case , __snake_case , __snake_case , image_embeds.dtype , __snake_case , __snake_case )
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case = {"""image_embeds""": image_embeds}
snake_case = self.unet(
sample=__snake_case , timestep=__snake_case , encoder_hidden_states=__snake_case , added_cond_kwargs=__snake_case , return_dict=__snake_case , )[0]
if do_classifier_free_guidance:
snake_case , snake_case = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case = noise_pred.chunk(2 )
snake_case , snake_case = variance_pred.chunk(2 )
snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case = self.scheduler.step(
__snake_case , __snake_case , __snake_case , generator=__snake_case , )[0]
# post-processing
snake_case = self.movq.decode(__snake_case , force_not_quantize=__snake_case )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
snake_case = image * 0.5 + 0.5
snake_case = image.clamp(0 , 1 )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 3 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "WhisperFeatureExtractor"
snake_case_ = "WhisperTokenizer"
def __init__( self : Dict , __snake_case : Any , __snake_case : int )-> List[Any]:
super().__init__(__snake_case , __snake_case )
snake_case = self.feature_extractor
snake_case = False
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str=None , __snake_case : List[str]=None , __snake_case : int=True )-> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__( self : str , *__snake_case : Tuple , **__snake_case : Union[str, Any] )-> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
snake_case = kwargs.pop("""audio""" , __snake_case )
snake_case = kwargs.pop("""sampling_rate""" , __snake_case )
snake_case = kwargs.pop("""text""" , __snake_case )
if len(__snake_case ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
snake_case = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
snake_case = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings["""input_ids"""]
return inputs
def lowerCAmelCase ( self : Union[str, Any] , *__snake_case : Union[str, Any] , **__snake_case : str )-> Optional[Any]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Optional[int] , *__snake_case : Any , **__snake_case : Union[str, Any] )-> List[str]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Any , __snake_case : str , __snake_case : Dict="np" )-> Any:
return self.tokenizer.get_prompt_ids(__snake_case , return_tensors=__snake_case )
| 3 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = []
create_all_state(1 , lowercase_ , lowercase_ , [] , lowercase_ )
return result
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(lowercase_ , total_number - level + 2 ):
current_list.append(lowercase_ )
create_all_state(i + 1 , lowercase_ , level - 1 , lowercase_ , lowercase_ )
current_list.pop()
def _lowerCAmelCase ( lowercase_ ):
for i in total_list:
print(*lowercase_ )
if __name__ == "__main__":
snake_case_ = 4
snake_case_ = 2
snake_case_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 78 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :str = "▁" , lowercase_ :bool = True , lowercase_ :Union[str, AddedToken] = "<unk>" , lowercase_ :Union[str, AddedToken] = "</s>" , lowercase_ :Union[str, AddedToken] = "<pad>" , ) -> str:
UpperCAmelCase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase = token_dict['token']
UpperCAmelCase = Tokenizer(Unigram() )
UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ),
pre_tokenizers.Digits(individual_digits=lowercase_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
UpperCAmelCase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, List[str]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Union[str, Any]:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [files]
self._tokenizer.train(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :str , lowercase_ :Union[Iterator[str], Iterator[Iterator[str]]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Tuple:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = json.loads(self._tokenizer.to_str() )
UpperCAmelCase = self.special_tokens['unk']['id']
UpperCAmelCase = Tokenizer.from_str(json.dumps(lowercase_ ) )
| 78 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowercase ( ) -> List[str]:
_UpperCamelCase = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__lowerCAmelCase )
_UpperCamelCase = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__lowerCAmelCase )
env_command_parser(subparsers=__lowerCAmelCase )
launch_command_parser(subparsers=__lowerCAmelCase )
tpu_command_parser(subparsers=__lowerCAmelCase )
test_command_parser(subparsers=__lowerCAmelCase )
# Let's go
_UpperCamelCase = parser.parse_args()
if not hasattr(__lowerCAmelCase , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 370 | """simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 54 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase="pt" ) ->Dict:
"""simple docstring"""
a_ = {"add_prefix_space": True} if isinstance(__lowercase , __lowercase ) and not line.startswith(" " ) else {}
a_ = padding_side
return tokenizer(
[line] , max_length=__lowercase , padding="max_length" if pad_to_max_length else None , truncation=__lowercase , return_tensors=__lowercase , add_special_tokens=__lowercase , **__lowercase , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , ) ->List[str]:
"""simple docstring"""
a_ = input_ids.ne(__lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="train" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , ) ->Dict:
super().__init__()
a_ = Path(__UpperCAmelCase).joinpath(type_path + ".source")
a_ = Path(__UpperCAmelCase).joinpath(type_path + ".target")
a_ = self.get_char_lens(self.src_file)
a_ = max_source_length
a_ = max_target_length
assert min(self.src_lens) > 0, F'''found empty line in {self.src_file}'''
a_ = tokenizer
a_ = prefix
if n_obs is not None:
a_ = self.src_lens[:n_obs]
a_ = src_lang
a_ = tgt_lang
def __len__( self) ->Optional[Any]:
return len(self.src_lens)
def __getitem__( self , __UpperCAmelCase) ->Tuple:
a_ = index + 1 # linecache starts at 1
a_ = self.prefix + linecache.getline(str(self.src_file) , __UpperCAmelCase).rstrip("\n")
a_ = linecache.getline(str(self.tgt_file) , __UpperCAmelCase).rstrip("\n")
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __UpperCAmelCase):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
a_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __UpperCAmelCase) else self.tokenizer
)
a_ = self.tokenizer.generator if isinstance(self.tokenizer , __UpperCAmelCase) else self.tokenizer
a_ = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_source_length , "right")
a_ = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_target_length , "right")
a_ = source_inputs["input_ids"].squeeze()
a_ = target_inputs["input_ids"].squeeze()
a_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase__ ( __UpperCAmelCase) ->Optional[Any]:
return [len(__UpperCAmelCase) for x in Path(__UpperCAmelCase).open().readlines()]
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[Any]:
a_ = torch.stack([x["input_ids"] for x in batch])
a_ = torch.stack([x["attention_mask"] for x in batch])
a_ = torch.stack([x["decoder_input_ids"] for x in batch])
a_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase)
else self.tokenizer.pad_token_id
)
a_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase)
else self.tokenizer.pad_token_id
)
a_ = trim_batch(__UpperCAmelCase , __UpperCAmelCase)
a_ , a_ = trim_batch(__UpperCAmelCase , __UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
UpperCamelCase_ = getLogger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return list(itertools.chain.from_iterable(__lowercase ) )
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = get_git_info()
save_json(__lowercase , os.path.join(__lowercase , "git_log.json" ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=4 , **UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
with open(__lowercase , "w" ) as f:
json.dump(__lowercase , __lowercase , indent=__lowercase , **__lowercase )
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
with open(__lowercase ) as f:
return json.load(__lowercase )
def UpperCamelCase ( ) ->int:
"""simple docstring"""
a_ = git.Repo(search_parent_directories=__lowercase )
a_ = {
"repo_id": str(__lowercase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List:
"""simple docstring"""
return list(map(__lowercase , __lowercase ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
with open(__lowercase , "wb" ) as f:
return pickle.dump(__lowercase , __lowercase )
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
def remove_articles(UpperCAmelCase ):
return re.sub(r"\b(a|an|the)\b" , " " , __lowercase )
def white_space_fix(UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase ):
a_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowercase ) ) ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = normalize_answer(__lowercase ).split()
a_ = normalize_answer(__lowercase ).split()
a_ = Counter(__lowercase ) & Counter(__lowercase )
a_ = sum(common.values() )
if num_same == 0:
return 0
a_ = 1.0 * num_same / len(__lowercase )
a_ = 1.0 * num_same / len(__lowercase )
a_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
return normalize_answer(__lowercase ) == normalize_answer(__lowercase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
assert len(__lowercase ) == len(__lowercase )
a_ = 0
for hypo, pred in zip(__lowercase , __lowercase ):
em += exact_match_score(__lowercase , __lowercase )
if len(__lowercase ) > 0:
em /= len(__lowercase )
return {"em": em}
def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return model_prefix.startswith("rag" )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
a_ = "dropout_rate"
for p in extra_params:
if getattr(__lowercase , __lowercase , __lowercase ):
if not hasattr(__lowercase , __lowercase ) and not hasattr(__lowercase , equivalent_param[p] ):
logger.info("config doesn\'t have a `{}` attribute".format(__lowercase ) )
delattr(__lowercase , __lowercase )
continue
a_ = p if hasattr(__lowercase , __lowercase ) else equivalent_param[p]
setattr(__lowercase , __lowercase , getattr(__lowercase , __lowercase ) )
delattr(__lowercase , __lowercase )
return hparams, config | 243 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 282 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> None:
'''simple docstring'''
__UpperCamelCase : Optional[int] = generate_pascal_triangle(_lowerCamelCase)
for row_idx in range(_lowerCamelCase):
# Print left spaces
for _ in range(num_rows - row_idx - 1):
print(end=" ")
# Print row values
for col_idx in range(row_idx + 1):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" ")
else:
print(triangle[row_idx][col_idx] , end="")
print()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> list[list[int]]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise TypeError("The input value of 'num_rows' should be 'int'")
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0")
__UpperCamelCase : list[list[int]] = []
for current_row_idx in range(_lowerCamelCase):
__UpperCamelCase : Tuple = populate_current_row(_lowerCamelCase , _lowerCamelCase)
triangle.append(_lowerCamelCase)
return triangle
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : int) -> list[int]:
'''simple docstring'''
__UpperCamelCase : str = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCamelCase , __UpperCamelCase : List[str] = 1, 1
for current_col_idx in range(1 , _lowerCamelCase):
calculate_current_element(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return current_row
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : int , ) -> None:
'''simple docstring'''
__UpperCamelCase : List[Any] = triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCamelCase : str = triangle[current_row_idx - 1][current_col_idx]
__UpperCamelCase : Any = above_to_left_elt + above_to_right_elt
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> list[list[int]]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise TypeError("The input value of 'num_rows' should be 'int'")
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0")
__UpperCamelCase : list[list[int]] = [[1]]
for row_index in range(1 , _lowerCamelCase):
__UpperCamelCase : str = [0] + result[-1] + [0]
__UpperCamelCase : List[str] = row_index + 1
# Calculate the number of distinct elements in a row
__UpperCamelCase : List[str] = sum(divmod(_lowerCamelCase , 2))
__UpperCamelCase : Dict = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1)
]
__UpperCamelCase : List[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCamelCase : Any = row_first_half + row_second_half
result.append(_lowerCamelCase)
return result
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int) -> None:
__UpperCamelCase : List[Any] = F'{func.__name__}({value})'
__UpperCamelCase : Any = timeit(F'__main__.{call}' , setup="import __main__")
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds')
for value in range(15): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase)
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 151 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Optional[Any] , a :Optional[Any] , a :Dict=1_3 , a :Tuple=7 , a :List[Any]=True , a :List[str]=True , a :List[Any]=True , a :Optional[Any]=True , a :Union[str, Any]=9_9 , a :int=3_2 , a :Optional[Any]=2 , a :List[str]=4 , a :Optional[Any]=3_7 , a :Union[str, Any]="gelu" , a :Optional[int]=0.1 , a :Dict=0.1 , a :Tuple=5_1_2 , a :Union[str, Any]=1_6 , a :int=2 , a :Any=0.02 , a :Union[str, Any]=False , a :int=True , a :str="None" , a :Union[str, Any]=3 , a :str=4 , a :List[Any]=None , ) -> Tuple:
__UpperCamelCase : Tuple = parent
__UpperCamelCase : List[str] = batch_size
__UpperCamelCase : Optional[Any] = seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Dict = use_input_mask
__UpperCamelCase : List[str] = use_token_type_ids
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Optional[Any] = vocab_size
__UpperCamelCase : Optional[Any] = hidden_size
__UpperCamelCase : Dict = num_hidden_layers
__UpperCamelCase : Any = num_attention_heads
__UpperCamelCase : str = intermediate_size
__UpperCamelCase : Union[str, Any] = hidden_act
__UpperCamelCase : Union[str, Any] = hidden_dropout_prob
__UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCamelCase : Tuple = max_position_embeddings
__UpperCamelCase : Tuple = type_vocab_size
__UpperCamelCase : Any = type_sequence_label_size
__UpperCamelCase : int = initializer_range
__UpperCamelCase : Dict = num_labels
__UpperCamelCase : Dict = num_choices
__UpperCamelCase : List[str] = relative_attention
__UpperCamelCase : Union[str, Any] = position_biased_input
__UpperCamelCase : Any = pos_att_type
__UpperCamelCase : Optional[Any] = scope
def _lowerCamelCase ( self :List[Any] ) -> List[Any]:
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Tuple = None
if self.use_input_mask:
__UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : List[Any] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : List[str] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self :Optional[int] , a :int , a :List[Any] , a :Optional[int] , a :Union[str, Any] , a :Union[str, Any] , a :str , a :int ) -> Optional[int]:
__UpperCamelCase : List[str] = TFDebertaVaModel(config=a )
__UpperCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : Optional[int] = model(a )
__UpperCamelCase : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self :str , a :List[Any] , a :Dict , a :Tuple , a :Union[str, Any] , a :str , a :Optional[int] , a :Optional[int] ) -> Optional[int]:
__UpperCamelCase : List[Any] = TFDebertaVaForMaskedLM(config=a )
__UpperCamelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self :List[Any] , a :Optional[int] , a :Optional[Any] , a :int , a :Optional[int] , a :Any , a :Dict , a :List[Any] ) -> Optional[int]:
__UpperCamelCase : Optional[int] = self.num_labels
__UpperCamelCase : int = TFDebertaVaForSequenceClassification(config=a )
__UpperCamelCase : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[int] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self :Optional[Any] , a :int , a :Dict , a :Union[str, Any] , a :Tuple , a :Tuple , a :Union[str, Any] , a :str ) -> int:
__UpperCamelCase : Tuple = self.num_labels
__UpperCamelCase : str = TFDebertaVaForTokenClassification(config=a )
__UpperCamelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self :List[str] , a :List[Any] , a :Union[str, Any] , a :List[str] , a :Union[str, Any] , a :Optional[Any] , a :Union[str, Any] , a :Tuple ) -> int:
__UpperCamelCase : List[Any] = TFDebertaVaForQuestionAnswering(config=a )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Tuple = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self :List[str] ) -> List[Any]:
__UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : List[Any] = config_and_inputs
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase):
'''simple docstring'''
_A = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : Dict = TFDebertaVaModelTester(self )
__UpperCamelCase : int = ConfigTester(self , config_class=a , hidden_size=3_7 )
def _lowerCamelCase ( self :Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self :List[Any] ) -> List[str]:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self :Optional[int] ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _lowerCamelCase ( self :Optional[Any] ) -> str:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _lowerCamelCase ( self :Optional[Any] ) -> Dict:
__UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _lowerCamelCase ( self :Any ) -> Optional[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def _lowerCamelCase ( self :int ) -> int:
__UpperCamelCase : Tuple = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(a )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
pass
@slow
def _lowerCamelCase ( self :Any ) -> Optional[int]:
__UpperCamelCase : List[Any] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
__UpperCamelCase : List[Any] = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__UpperCamelCase : Optional[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCamelCase : str = model(a , attention_mask=a )[0]
__UpperCamelCase : Optional[int] = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , a , atol=1E-4 ) | 151 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Optional[Any], __A : Dict ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ):
UpperCAmelCase : Optional[Any] = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(__A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : str = '''sshleifer/tiny-gpt2'''
UpperCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], eager_mode=__A, multi_process=__A, )
UpperCAmelCase : Tuple = TensorFlowBenchmark(__A )
UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], multi_process=__A, only_pretrain_model=__A, )
UpperCAmelCase : List[str] = TensorFlowBenchmark(__A )
UpperCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Union[str, Any] = '''sshleifer/tiny-gpt2'''
UpperCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], multi_process=__A, )
UpperCAmelCase : Union[str, Any] = TensorFlowBenchmark(__A )
UpperCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Optional[int] = '''sshleifer/tiny-gpt2'''
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(__A )
UpperCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], eager_mode=__A, multi_process=__A, )
UpperCAmelCase : Tuple = TensorFlowBenchmark(__A, [config] )
UpperCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Dict = '''sshleifer/tiny-gpt2'''
UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(__A )
UpperCAmelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], multi_process=__A, )
UpperCAmelCase : Union[str, Any] = TensorFlowBenchmark(__A, [config] )
UpperCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __magic_name__ ( self : Dict ):
UpperCAmelCase : List[str] = '''sshleifer/tiny-gpt2'''
UpperCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], multi_process=__A, )
UpperCAmelCase : Optional[int] = TensorFlowBenchmark(__A )
UpperCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __magic_name__ ( self : str ):
UpperCAmelCase : Optional[int] = '''sshleifer/tiny-gpt2'''
UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(__A )
UpperCAmelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], multi_process=__A, )
UpperCAmelCase : int = TensorFlowBenchmark(__A, [config] )
UpperCAmelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(__A )
UpperCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], multi_process=__A, )
UpperCAmelCase : List[Any] = TensorFlowBenchmark(__A, configs=[config] )
UpperCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0, '''Cannot do xla on CPU.''' )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Optional[Any] = '''sshleifer/tiny-gpt2'''
UpperCAmelCase : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__A, inference=__A, sequence_lengths=[8], batch_sizes=[1], use_xla=__A, multi_process=__A, )
UpperCAmelCase : str = TensorFlowBenchmark(__A )
UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __magic_name__ ( self : Any ):
UpperCAmelCase : List[str] = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], inference=__A, save_to_csv=__A, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(__A, '''inf_time.csv''' ), inference_memory_csv_file=os.path.join(__A, '''inf_mem.csv''' ), env_info_csv_file=os.path.join(__A, '''env.csv''' ), multi_process=__A, )
UpperCAmelCase : Optional[Any] = TensorFlowBenchmark(__A )
benchmark.run()
self.assertTrue(Path(os.path.join(__A, '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__A, '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__A, '''env.csv''' ) ).exists() )
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Union[str, Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(__A : int ):
self.assertTrue(hasattr(__A, '''sequential''' ) )
self.assertTrue(hasattr(__A, '''cumulative''' ) )
self.assertTrue(hasattr(__A, '''current''' ) )
self.assertTrue(hasattr(__A, '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID], inference=__A, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(__A, '''log.txt''' ), log_print=__A, trace_memory_line_by_line=__A, eager_mode=__A, multi_process=__A, )
UpperCAmelCase : Optional[Any] = TensorFlowBenchmark(__A )
UpperCAmelCase : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__A, '''log.txt''' ) ).exists() )
| 336 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __UpperCAmelCase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCamelCase = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def a__ ( ) -> Dict:
if os.name == "nt":
UpperCAmelCase : List[str] = CursorInfo()
UpperCAmelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def a__ ( ) -> Optional[int]:
if os.name == "nt":
UpperCAmelCase : int = CursorInfo()
UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
UpperCAmelCase : Any = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase , ctypes.byref(UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def a__ ( ) -> Optional[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 336 | 1 |
from ...processing_utils import ProcessorMixin
class _a ( __snake_case ):
A = 'SpeechT5FeatureExtractor'
A = 'SpeechT5Tokenizer'
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
super().__init__(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __call__(self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: Optional[int] = kwargs.pop("""audio""", SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = kwargs.pop("""text""", SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = kwargs.pop("""text_target""", SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = kwargs.pop("""audio_target""", SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = kwargs.pop("""sampling_rate""", SCREAMING_SNAKE_CASE_ )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
UpperCAmelCase_: Any = self.feature_extractor(SCREAMING_SNAKE_CASE_, *SCREAMING_SNAKE_CASE_, sampling_rate=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
elif text is not None:
UpperCAmelCase_: Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: Tuple = None
if audio_target is not None:
UpperCAmelCase_: str = self.feature_extractor(audio_target=SCREAMING_SNAKE_CASE_, *SCREAMING_SNAKE_CASE_, sampling_rate=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = targets["""input_values"""]
elif text_target is not None:
UpperCAmelCase_: Any = self.tokenizer(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = targets["""input_ids"""]
else:
UpperCAmelCase_: Tuple = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase_: Union[str, Any] = labels
UpperCAmelCase_: List[Any] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
UpperCAmelCase_: int = decoder_attention_mask
return inputs
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Optional[int] = kwargs.pop("""input_values""", SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = kwargs.pop("""input_ids""", SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = kwargs.pop("""labels""", SCREAMING_SNAKE_CASE_ )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
UpperCAmelCase_: Optional[int] = self.feature_extractor.pad(SCREAMING_SNAKE_CASE_, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
elif input_ids is not None:
UpperCAmelCase_: str = self.tokenizer.pad(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: int = None
if labels is not None:
if "input_ids" in labels or (isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) and "input_ids" in labels[0]):
UpperCAmelCase_: Dict = self.tokenizer.pad(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = targets["""input_ids"""]
else:
UpperCAmelCase_: List[str] = self.feature_extractor.feature_size
UpperCAmelCase_: List[Any] = self.feature_extractor.num_mel_bins
UpperCAmelCase_: Dict = self.feature_extractor.pad(SCREAMING_SNAKE_CASE_, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = feature_size_hack
UpperCAmelCase_: Optional[Any] = targets["""input_values"""]
else:
UpperCAmelCase_: Optional[int] = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase_: Dict = labels
UpperCAmelCase_: List[Any] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
UpperCAmelCase_: Optional[Any] = decoder_attention_mask
return inputs
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> int:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
| 362 |
from __future__ import annotations
def lowerCAmelCase_ (lowerCAmelCase__: list[float] ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = 0.00
UpperCAmelCase_: List[str] = 0
for resistor in resistors:
if resistor <= 0:
UpperCAmelCase_: Dict = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(lowerCAmelCase__ )
first_sum += 1 / float(lowerCAmelCase__ )
index += 1
return 1 / first_sum
def lowerCAmelCase_ (lowerCAmelCase__: list[float] ):
"""simple docstring"""
UpperCAmelCase_: Any = 0.00
UpperCAmelCase_: int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCAmelCase_: int = F'Resistor at index {index} has a negative value!'
raise ValueError(lowerCAmelCase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : Any ={
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( _lowercase ):
a = """canine"""
def __init__( self: Optional[int] , UpperCamelCase__: List[Any]=768 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Optional[Any]=12 , UpperCamelCase__: Optional[int]=3_072 , UpperCamelCase__: Optional[int]="gelu" , UpperCamelCase__: List[str]=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Union[str, Any]=16_384 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Union[str, Any]=0.02 , UpperCamelCase__: Any=1e-12 , UpperCamelCase__: Optional[int]=0 , UpperCamelCase__: List[Any]=0xE000 , UpperCamelCase__: List[Any]=0xE001 , UpperCamelCase__: Union[str, Any]=4 , UpperCamelCase__: int=4 , UpperCamelCase__: List[str]=8 , UpperCamelCase__: List[str]=16_384 , UpperCamelCase__: int=128 , **UpperCamelCase__: str , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Tuple = layer_norm_eps
# Character config:
lowerCamelCase__ : int = downsampling_rate
lowerCamelCase__ : Tuple = upsampling_kernel_size
lowerCamelCase__ : Any = num_hash_functions
lowerCamelCase__ : Any = num_hash_buckets
lowerCamelCase__ : str = local_transformer_stride
| 41 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='''utf-8''' ,check=__lowerCamelCase ,)
assert hasattr(self ,'''env''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowerCAmelCase__ : Optional[Any] = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__lowerCamelCase ,instance_count=__lowerCamelCase ,instance_type=self.instance_type ,debugger_hook_config=__lowerCamelCase ,hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__lowerCamelCase ,py_version='''py36''' ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
TrainingJobAnalytics(__lowerCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.create_estimator(__lowerCamelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowerCAmelCase__ : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' ,99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" ,'''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} ,__lowerCamelCase )
| 129 | 0 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCAmelCase : Tuple = HfArgumentParser(InitializationArguments)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCAmelCase : Union[str, Any] = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
_lowerCAmelCase : Any = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCAmelCase : Any = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 340 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'camembert'
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 340 | 1 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a , __a = coefficient_matrix.shape
__a , __a = constant_matrix.shape
if rowsa != colsa:
__a = f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(_UpperCAmelCase )
if colsa != 1:
__a = f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(_UpperCAmelCase )
if rowsa != rowsa:
__a = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(_UpperCAmelCase )
if len(_UpperCAmelCase ) != rowsa:
__a = (
'''Number of initial values must be equal to number of rows in coefficient '''
f'matrix but received {len(_UpperCAmelCase )} and {rowsa}'
)
raise ValueError(_UpperCAmelCase )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
__a = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__a , __a = table.shape
strictly_diagonally_dominant(_UpperCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_UpperCAmelCase ):
__a = []
for row in range(_UpperCAmelCase ):
__a = 0
for col in range(_UpperCAmelCase ):
if col == row:
__a = table[row][col]
elif col == cols - 1:
__a = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__a = (temp + val) / denom
new_val.append(_UpperCAmelCase )
__a = new_val
return [float(_UpperCAmelCase ) for i in new_val]
def __snake_case ( _UpperCAmelCase ):
__a , __a = table.shape
__a = True
for i in range(0 , _UpperCAmelCase ):
__a = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__snake_case = logging.get_logger(__name__)
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : Dict = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
snake_case : Optional[int] = json.loads(lowercase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
snake_case : Optional[int] = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
snake_case : Any = json.loads(lowercase )
if not mpi_options.get("sagemaker_mpi_enabled" , lowercase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : str = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , UpperCamelCase__ , )
@cached_property
def lowerCamelCase ( self ) -> "torch.device":
'''simple docstring'''
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
snake_case : Optional[Any] = torch.device("cpu" )
snake_case : List[Any] = 0
elif is_sagemaker_model_parallel_available():
snake_case : Tuple = smp.local_rank()
snake_case : int = torch.device("cuda" , UpperCamelCase__ )
snake_case : Dict = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
snake_case : Any = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
snake_case : Optional[Any] = torch.device("cuda" , self.local_rank )
snake_case : str = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
snake_case : List[str] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
snake_case : Optional[Any] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
snake_case : Any = torch.device("cuda" , self.local_rank )
snake_case : Dict = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCamelCase__ )
return device
@property
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
return False
| 203 | 0 |
"""simple docstring"""
from manim import *
class lowerCamelCase_ ( __lowerCAmelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Union[str, Any] = Rectangle(height=0.5 , width=0.5)
__UpperCamelCase :int = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
__UpperCamelCase :Dict = [mem.copy() for i in range(6)]
__UpperCamelCase :Dict = [mem.copy() for i in range(6)]
__UpperCamelCase :str = VGroup(*lowerCamelCase__).arrange(lowerCamelCase__ , buff=0)
__UpperCamelCase :Optional[Any] = VGroup(*lowerCamelCase__).arrange(lowerCamelCase__ , buff=0)
__UpperCamelCase :Optional[Any] = VGroup(lowerCamelCase__ , lowerCamelCase__).arrange(lowerCamelCase__ , buff=0)
__UpperCamelCase :Optional[Any] = Text('''CPU''' , font_size=24)
__UpperCamelCase :Optional[int] = Group(lowerCamelCase__ , lowerCamelCase__).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowerCamelCase__)
__UpperCamelCase :List[Any] = [mem.copy() for i in range(1)]
__UpperCamelCase :Tuple = VGroup(*lowerCamelCase__).arrange(lowerCamelCase__ , buff=0)
__UpperCamelCase :Optional[Any] = Text('''GPU''' , font_size=24)
__UpperCamelCase :Tuple = Group(lowerCamelCase__ , lowerCamelCase__).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__)
gpu.align_to(lowerCamelCase__ , lowerCamelCase__)
gpu.set_x(gpu.get_x() - 1)
self.add(lowerCamelCase__)
__UpperCamelCase :int = [mem.copy() for i in range(6)]
__UpperCamelCase :Optional[Any] = VGroup(*lowerCamelCase__).arrange(lowerCamelCase__ , buff=0)
__UpperCamelCase :Tuple = Text('''Model''' , font_size=24)
__UpperCamelCase :List[Any] = Group(lowerCamelCase__ , lowerCamelCase__).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__)
model.move_to([3, -1.0, 0])
self.play(
Create(lowerCamelCase__ , run_time=1) , Create(lowerCamelCase__ , run_time=1) , Create(lowerCamelCase__ , run_time=1) , )
__UpperCamelCase :Tuple = MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
__UpperCamelCase :Dict = Square(side_length=2.2)
key.move_to([-5, 2, 0])
__UpperCamelCase :Optional[int] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(lowerCamelCase__ , run_time=2.5) , Write(lowerCamelCase__) , Write(lowerCamelCase__))
self.add(lowerCamelCase__)
__UpperCamelCase :Optional[Any] = []
__UpperCamelCase :Any = []
__UpperCamelCase :Optional[Any] = []
for i, rect in enumerate(lowerCamelCase__):
__UpperCamelCase :Optional[Any] = Rectangle(height=0.46 , width=0.46).set_stroke(width=0.0).set_fill(lowerCamelCase__ , opacity=0.7)
cpu_target.move_to(lowerCamelCase__)
cpu_target.generate_target()
__UpperCamelCase :Any = 0.46 / 4
__UpperCamelCase :Union[str, Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowerCamelCase__)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase__ , buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase__ , buff=0.0)
cpu_targs.append(lowerCamelCase__)
first_animations.append(rect.animate(run_time=0.5).set_stroke(lowerCamelCase__))
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5))
self.play(*lowerCamelCase__)
self.play(*lowerCamelCase__)
self.wait()
| 357 | from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__lowercase = {'''allegro/herbert-base-cased''': 514}
__lowercase = {}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Tuple = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Union[str, Any] = HerbertTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase="</s>" , **__lowercase , ) -> Optional[Any]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , sep_token=__lowercase , **__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :List[str] = [self.cls_token_id]
__UpperCamelCase :Tuple = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase)
if token_ids_a is None:
return [1] + ([0] * len(__lowercase)) + [1]
return [1] + ([0] * len(__lowercase)) + [1] + ([0] * len(__lowercase)) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :Optional[Any] = [self.sep_token_id]
__UpperCamelCase :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[int] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
| 105 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def __lowercase ( _a = 2_000_000 ):
snake_case_ : list[int] = [0]
snake_case_ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case_ : int = 0
# the area corresponding to the grid that gives the product closest to target
snake_case_ : int = 0
# an estimate of b, using the quadratic formula
snake_case_ : float
# the largest integer less than b_estimate
snake_case_ : int
# the largest integer less than b_estimate
snake_case_ : int
# the triangle number corresponding to b_floor
snake_case_ : int
# the triangle number corresponding to b_ceil
snake_case_ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case_ : Union[str, Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case_ : Tuple = floor(_a )
snake_case_ : List[str] = ceil(_a )
snake_case_ : Union[str, Any] = triangle_numbers[b_floor]
snake_case_ : Any = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case_ : List[Any] = triangle_b_first_guess * triangle_a
snake_case_ : Optional[int] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case_ : Tuple = triangle_b_second_guess * triangle_a
snake_case_ : Dict = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'{solution() = }')
| 264 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 264 | 1 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=100, lowerCAmelCase=13, lowerCAmelCase=30, lowerCAmelCase=2, lowerCAmelCase=3, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=32, lowerCAmelCase=4, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=10, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=None, lowerCAmelCase=[0, 1, 2, 3], ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =100
lowerCamelCase_ =batch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =is_training
lowerCamelCase_ =use_labels
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =scope
lowerCamelCase_ =out_indices
lowerCamelCase_ =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ =(image_size // patch_size) ** 2
lowerCamelCase_ =num_patches + 1
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowerCamelCase_ =self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self ):
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCAmelCase, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =BeitModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =BeitForMaskedImageModeling(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.type_sequence_label_size
lowerCamelCase_ =BeitForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ =1
lowerCamelCase_ =BeitForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =BeitForSemanticSegmentation(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs
lowerCamelCase_ ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Tuple =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase : List[str] =(
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase : List[Any] =False
lowercase : Optional[int] =False
lowercase : List[str] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BeitModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, has_text_modality=lowerCAmelCase, hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCamelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase, nn.Linear ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(lowerCAmelCase )
lowerCamelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ =[*signature.parameters.keys()]
lowerCamelCase_ =['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCAmelCase ), BeitForMaskedImageModeling]:
continue
lowerCamelCase_ =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
lowerCamelCase_ =model(**lowerCAmelCase ).loss
loss.backward()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCamelCase_ =False
lowerCamelCase_ =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCamelCase_ =model_class(lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase )
model.train()
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
lowerCamelCase_ =model(**lowerCAmelCase ).loss
loss.backward()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =BeitModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(lowerCAmelCase )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).pixel_values.to(lowerCAmelCase )
# prepare bool_masked_pos
lowerCamelCase_ =torch.ones((1, 196), dtype=torch.bool ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ =model(pixel_values=lowerCAmelCase, bool_masked_pos=lowerCAmelCase )
lowerCamelCase_ =outputs.logits
# verify the logits
lowerCamelCase_ =torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -13.9_174], [-3.2_4_5_6, 0.4_9_4_8, -13.9_401], [-3.2_0_3_3, 0.5_1_2_1, -13.8_550]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], lowerCAmelCase, atol=1e-2 ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(lowerCAmelCase )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ =model(**lowerCAmelCase )
lowerCamelCase_ =outputs.logits
# verify the logits
lowerCamelCase_ =torch.Size((1, 1_000) )
self.assertEqual(logits.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3], lowerCAmelCase, atol=1e-4 ) )
lowerCamelCase_ =281
self.assertEqual(logits.argmax(-1 ).item(), lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
lowerCAmelCase )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ =model(**lowerCAmelCase )
lowerCamelCase_ =outputs.logits
# verify the logits
lowerCamelCase_ =torch.Size((1, 21_841) )
self.assertEqual(logits.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3], lowerCAmelCase, atol=1e-4 ) )
lowerCamelCase_ =2_396
self.assertEqual(logits.argmax(-1 ).item(), lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowerCamelCase_ =model.to(lowerCAmelCase )
lowerCamelCase_ =BeitImageProcessor(do_resize=lowerCAmelCase, size=640, do_center_crop=lowerCAmelCase )
lowerCamelCase_ =load_dataset('''hf-internal-testing/fixtures_ade20k''', split='''test''' )
lowerCamelCase_ =Image.open(ds[0]['''file'''] )
lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ =model(**lowerCAmelCase )
lowerCamelCase_ =outputs.logits
# verify the logits
lowerCamelCase_ =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, lowerCAmelCase )
lowerCamelCase_ =version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
lowerCamelCase_ =torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
], device=lowerCAmelCase, )
else:
lowerCamelCase_ =torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
], device=lowerCAmelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCAmelCase, atol=1e-4 ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowerCamelCase_ =model.to(lowerCAmelCase )
lowerCamelCase_ =BeitImageProcessor(do_resize=lowerCAmelCase, size=640, do_center_crop=lowerCAmelCase )
lowerCamelCase_ =load_dataset('''hf-internal-testing/fixtures_ade20k''', split='''test''' )
lowerCamelCase_ =Image.open(ds[0]['''file'''] )
lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ =model(**lowerCAmelCase )
lowerCamelCase_ =outputs.logits.detach().cpu()
lowerCamelCase_ =image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase, target_sizes=[(500, 300)] )
lowerCamelCase_ =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, lowerCAmelCase )
lowerCamelCase_ =image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase )
lowerCamelCase_ =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, lowerCAmelCase )
| 351 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
a_ : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
a_ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
a_ : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Dict =DPRContextEncoderTokenizer
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] =DPRQuestionEncoderTokenizer
a_ : Union[str, Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
a_ : Dict = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCamelCase__ )
class __UpperCamelCase :
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
elif titles is None or texts is None:
lowerCamelCase_ =titles if texts is None else texts
return super().__call__(
lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles]
lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'''
lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ =attention_mask
return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ):
"""simple docstring"""
lowerCamelCase_ =reader_input['''input_ids''']
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ )
lowerCamelCase_ =[]
for doc_id in sorted_docs:
lowerCamelCase_ =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ =sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =[]
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
lowerCamelCase_ =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase_ =end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION
lowercase : int =['input_ids', 'attention_mask']
lowercase : Dict =DPRReaderTokenizer
| 6 | 0 |
lowerCAmelCase__ : Optional[int] = 9.8_06_65
def UpperCamelCase__ ( A__ , A__ , A__ = g ) -> int:
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 143 | import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
A : Tuple = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
A : Dict = "main"
# Default branch name
A : List[str] = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
A : Tuple = "aaaaaaa"
# This commit does not exist, so we should 404.
A : int = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
A : Tuple = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def a__ ( ):
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def a__ ( ):
print("Bonjour!" )
yield
print("Au revoir!" )
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] ) -> Any:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __A ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __A ( self : Dict , __magic_name__ : Union[str, Any] ) -> int:
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __A ( self : Tuple , __magic_name__ : str ) -> Union[str, Any]:
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def __A ( self : List[str] ) -> Union[str, Any]:
self.assertEqual(find_labels(__magic_name__ ) , ["labels"] )
self.assertEqual(find_labels(__magic_name__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__magic_name__ ) , ["start_positions", "end_positions"] )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__magic_name__ ) , ["labels"] )
@require_tf
def __A ( self : List[str] ) -> Optional[Any]:
self.assertEqual(find_labels(__magic_name__ ) , ["labels"] )
self.assertEqual(find_labels(__magic_name__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__magic_name__ ) , ["start_positions", "end_positions"] )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__magic_name__ ) , ["labels"] )
@require_flax
def __A ( self : int ) -> Tuple:
# Flax models don't have labels
self.assertEqual(find_labels(__magic_name__ ) , [] )
self.assertEqual(find_labels(__magic_name__ ) , [] )
self.assertEqual(find_labels(__magic_name__ ) , [] )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(__magic_name__ ) , [] )
| 118 | 0 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
_UpperCamelCase : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__a , "num_attention_heads" ) )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __a : Optional[int] , __a : str=13 , __a : int=64 , __a : str=3 , __a : Tuple=3 , __a : Optional[Any]=2 , __a : Tuple=1 , __a : Union[str, Any]=16 , __a : Any=[128, 256, 384] , __a : List[Any]=[4, 6, 8] , __a : Union[str, Any]=[2, 3, 4] , __a : Union[str, Any]=[16, 16, 16] , __a : Optional[Any]=0 , __a : List[str]=[2, 2, 2] , __a : int=[2, 2, 2] , __a : List[str]=0.02 , __a : Tuple=True , __a : int=True , __a : Dict=2 , ) -> List[Any]:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : Tuple = num_channels
_UpperCamelCase : Any = kernel_size
_UpperCamelCase : Any = stride
_UpperCamelCase : Dict = padding
_UpperCamelCase : List[str] = hidden_sizes
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : List[Any] = depths
_UpperCamelCase : List[Any] = key_dim
_UpperCamelCase : Dict = drop_path_rate
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : List[Any] = attention_ratio
_UpperCamelCase : List[Any] = mlp_ratio
_UpperCamelCase : List[Any] = initializer_range
_UpperCamelCase : List[str] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_UpperCamelCase : Optional[Any] = is_training
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Any = num_labels
_UpperCamelCase : Any = initializer_range
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : str = None
if self.use_labels:
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Union[str, Any] , __a : Optional[Any] , __a : int ) -> Optional[Any]:
_UpperCamelCase : int = LevitModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Dict = model(__a )
_UpperCamelCase : Optional[Any] = (self.image_size, self.image_size)
_UpperCamelCase, _UpperCamelCase : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
_UpperCamelCase : str = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_UpperCamelCase : Tuple = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Optional[Any] , __a : Any , __a : str ) -> Tuple:
_UpperCamelCase : List[str] = self.num_labels
_UpperCamelCase : Optional[int] = LevitForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Union[str, Any] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
_UpperCamelCase : Tuple = self.prepare_config_and_inputs()
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = config_and_inputs
_UpperCamelCase : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :List[Any] = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Optional[int] = False
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Optional[int] = False
SCREAMING_SNAKE_CASE__ :Tuple = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase : Dict = LevitModelTester(self )
_UpperCamelCase : int = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> str:
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
pass
@unittest.skip(reason="Levit does not output attentions" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
_UpperCamelCase, _UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : int = [*signature.parameters.keys()]
_UpperCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
def check_hidden_states_output(__a : List[str] , __a : List[Any] , __a : List[Any] ):
_UpperCamelCase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_UpperCamelCase : Dict = model(**self._prepare_for_class(__a , __a ) )
_UpperCamelCase : List[Any] = outputs.hidden_states
_UpperCamelCase : int = len(self.model_tester.depths ) + 1
self.assertEqual(len(__a ) , __a )
_UpperCamelCase : Tuple = (self.model_tester.image_size, self.model_tester.image_size)
_UpperCamelCase, _UpperCamelCase : int = image_size[0], image_size[1]
for _ in range(4 ):
_UpperCamelCase : str = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_UpperCamelCase : Optional[int] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[int] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Any = True
check_hidden_states_output(__a , __a , __a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Dict , __a : int , __a : Union[str, Any]=False ) -> Tuple:
_UpperCamelCase : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
if not self.model_tester.is_training:
return
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Dict = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__a )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_UpperCamelCase : str = model_class(__a )
model.to(__a )
model.train()
_UpperCamelCase : Tuple = self._prepare_for_class(__a , __a , return_labels=__a )
_UpperCamelCase : Optional[Any] = model(**__a ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase, _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_UpperCamelCase : Tuple = False
_UpperCamelCase : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(__a ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_UpperCamelCase : Tuple = model_class(__a )
model.gradient_checkpointing_enable()
model.to(__a )
model.train()
_UpperCamelCase : List[str] = self._prepare_for_class(__a , __a , return_labels=__a )
_UpperCamelCase : Optional[Any] = model(**__a ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Dict = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__a ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
_UpperCamelCase : int = problem_type["title"]
_UpperCamelCase : str = problem_type["num_labels"]
_UpperCamelCase : Union[str, Any] = model_class(__a )
model.to(__a )
model.train()
_UpperCamelCase : List[Any] = self._prepare_for_class(__a , __a , return_labels=__a )
if problem_type["num_labels"] > 1:
_UpperCamelCase : str = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
_UpperCamelCase : List[str] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__a ) as warning_list:
_UpperCamelCase : str = model(**__a ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = LevitModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
_UpperCamelCase : Optional[int] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__a )
_UpperCamelCase : List[Any] = self.default_image_processor
_UpperCamelCase : Union[str, Any] = prepare_img()
_UpperCamelCase : Tuple = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : List[str] = model(**__a )
# verify the logits
_UpperCamelCase : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : Union[str, Any] = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 353 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowerCamelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowerCamelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : List[List[List[str]]] , __a : List[List[str]] , __a : int = 1 , __a : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a , hypotheses=__a , min_len=__a , max_len=__a )
}
| 310 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase : str = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=8 ):
'''simple docstring'''
A : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCAmelCase_ ( snake_case__ , snake_case__=512 , snake_case__=512 ):
'''simple docstring'''
A : Optional[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
A : str = np.array(pil_image.convert('''RGB''' ) )
A : Union[str, Any] = arr.astype(np.floataa ) / 1_27.5 - 1
A : Tuple = np.transpose(snake_case__ , [2, 0, 1] )
A : List[str] = torch.from_numpy(snake_case__ ).unsqueeze(0 )
return image
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , movq=SCREAMING_SNAKE_CASE , )
A : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : Any = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE )
A : int = max(num_inference_steps - init_timestep , 0 )
A : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[Any]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE )}' )
A : str = image.to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
A : Optional[int] = image
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Union[str, Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE )
]
A : Optional[int] = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
else:
A : Optional[Any] = self.movq.encode(SCREAMING_SNAKE_CASE ).latent_dist.sample(SCREAMING_SNAKE_CASE )
A : str = self.movq.config.scaling_factor * init_latents
A : Any = torch.cat([init_latents] , dim=0 )
A : Tuple = init_latents.shape
A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
# get latents
A : str = self.scheduler.add_noise(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Dict = init_latents
return latents
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
A : List[Any] = torch.device(F'cuda:{gpu_id}' )
A : Optional[int] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 ) -> Tuple:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
A : Dict = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
A, A : str = cpu_offload_with_hook(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prev_module_hook=SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
A : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 4.0 , SCREAMING_SNAKE_CASE = 0.3 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> List[str]:
"""simple docstring"""
A : Tuple = self._execution_device
A : Any = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : str = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
A : str = image_embeds.shape[0]
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Dict = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
A : Union[str, Any] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
A : List[str] = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
A : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : Dict = [image]
if not all(isinstance(SCREAMING_SNAKE_CASE , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'Input is in incorrect format: {[type(SCREAMING_SNAKE_CASE ) for i in image]}. Currently, we only support PIL image and pytorch tensor' )
A : Tuple = torch.cat([prepare_image(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in image] , dim=0 )
A : Tuple = image.to(dtype=image_embeds.dtype , device=SCREAMING_SNAKE_CASE )
A : Tuple = self.movq.encode(SCREAMING_SNAKE_CASE )['''latents''']
A : int = latents.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
A, A : Optional[int] = self.get_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
A, A : Optional[Any] = downscale_height_and_width(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.movq_scale_factor )
A : Optional[int] = self.prepare_latents(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , image_embeds.dtype , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
A : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A : Dict = {'''image_embeds''': image_embeds}
A : Tuple = self.unet(
sample=SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , added_cond_kwargs=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
A, A : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
A, A : List[str] = noise_pred.chunk(2 )
A, A : Tuple = variance_pred.chunk(2 )
A : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A, A : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A : List[Any] = self.scheduler.step(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , )[0]
# post-processing
A : Tuple = self.movq.decode(SCREAMING_SNAKE_CASE , force_not_quantize=SCREAMING_SNAKE_CASE )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
A : Union[str, Any] = image * 0.5 + 0.5
A : Dict = image.clamp(0 , 1 )
A : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
| 3 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = []
A : Union[str, Any] = []
for i in range(self.num_layers ):
A : Any = self.in_channels if i == 0 else self.out_channels
A : Optional[Any] = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = resnets
A : Union[str, Any] = attentions
if self.add_downsample:
A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[Any] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = []
for i in range(self.num_layers ):
A : Optional[Any] = self.in_channels if i == 0 else self.out_channels
A : List[str] = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : Dict = resnets
if self.add_downsample:
A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
"""simple docstring"""
A : str = ()
for resnet in self.resnets:
A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = []
A : Optional[int] = []
for i in range(self.num_layers ):
A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
A : Dict = self.prev_output_channel if i == 0 else self.out_channels
A : List[str] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : int = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE )
A : Dict = resnets
A : Optional[Any] = attentions
if self.add_upsample:
A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
A : List[str] = res_hidden_states_tuple[-1]
A : int = res_hidden_states_tuple[:-1]
A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
if self.add_upsample:
A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE )
return hidden_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : int = []
for i in range(self.num_layers ):
A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
A : List[str] = self.prev_output_channel if i == 0 else self.out_channels
A : str = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : List[Any] = resnets
if self.add_upsample:
A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
A : Optional[int] = res_hidden_states_tuple[-1]
A : Optional[Any] = res_hidden_states_tuple[:-1]
A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
if self.add_upsample:
A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE )
return hidden_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : str = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
A : List[Any] = []
for _ in range(self.num_layers ):
A : int = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : List[str] = resnets
A : List[str] = attentions
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
"""simple docstring"""
A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
return hidden_states
| 3 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case ( snake_case__ :List[Any]) -> Any:
_A = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
_A = DetaConfig(
backbone_config=snake_case__ , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=snake_case__ , with_box_refine=snake_case__ , two_stage=snake_case__ , )
# set labels
_A = """huggingface/label-files"""
if "o365" in model_name:
_A = 366
_A = """object365-id2label.json"""
else:
_A = 91
_A = """coco-detection-id2label.json"""
_A = num_labels
_A = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type="""dataset""")) , """r"""))
_A = {int(snake_case__): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
return config
def snake_case ( snake_case__ :Union[str, Any]) -> Optional[Any]:
_A = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight"""))
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias"""))
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight"""))
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias"""))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias'''))
if i < 3:
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight'''))
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias'''))
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight"""))
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias"""))
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight"""))
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias"""))
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight"""))
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias"""))
# transformer encoder
for i in range(config.encoder_layers):
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias'''))
# transformer decoder
for i in range(config.decoder_layers):
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias'''))
# fmt: on
return rename_keys
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :str , snake_case__ :Union[str, Any]) -> List[str]:
_A = dct.pop(snake_case__)
_A = val
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Optional[Any]) -> Optional[Any]:
_A = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
_A = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_A = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''')
_A = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:dim, :]
_A = in_proj_bias[: dim]
_A = in_proj_weight[
dim : dim * 2, :
]
_A = in_proj_bias[
dim : dim * 2
]
_A = in_proj_weight[
-dim :, :
]
_A = in_proj_bias[-dim :]
# fmt: on
def snake_case ( snake_case__ :Tuple , snake_case__ :int) -> Union[str, Any]:
_A = config.d_model
for i in range(config.decoder_layers):
# read in weights + bias of input projection layer of self-attention
_A = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''')
_A = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[:hidden_size, :]
_A = in_proj_bias[:hidden_size]
_A = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_A = in_proj_bias[hidden_size : hidden_size * 2]
_A = in_proj_weight[-hidden_size:, :]
_A = in_proj_bias[-hidden_size:]
def snake_case ( ) -> Union[str, Any]:
_A = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_A = Image.open(requests.get(snake_case__ , stream=snake_case__).raw)
return im
@torch.no_grad()
def snake_case ( snake_case__ :Optional[int] , snake_case__ :int , snake_case__ :Optional[Any]) -> Optional[Any]:
_A = get_deta_config(snake_case__)
# load original state dict
if model_name == "deta-swin-large":
_A = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""")
elif model_name == "deta-swin-large-o365":
_A = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""")
else:
raise ValueError(F'''Model name {model_name} not supported''')
_A = torch.load(snake_case__ , map_location="""cpu""")["""model"""]
# original state dict
for name, param in state_dict.items():
print(snake_case__ , param.shape)
# rename keys
_A = create_rename_keys(snake_case__)
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__)
read_in_swin_q_k_v(snake_case__ , config.backbone_config)
read_in_decoder_q_k_v(snake_case__ , snake_case__)
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_A = state_dict.pop(snake_case__)
_A = val
if "input_proj" in key:
_A = state_dict.pop(snake_case__)
_A = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_A = state_dict.pop(snake_case__)
_A = val
# finally, create HuggingFace model and load state dict
_A = DetaForObjectDetection(snake_case__)
model.load_state_dict(snake_case__)
model.eval()
_A = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(snake_case__)
# load image processor
_A = DetaImageProcessor(format="""coco_detection""")
# verify our conversion on image
_A = prepare_img()
_A = processor(images=snake_case__ , return_tensors="""pt""")
_A = encoding["""pixel_values"""]
_A = model(pixel_values.to(snake_case__))
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3])
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3])
if model_name == "deta-swin-large":
_A = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]])
_A = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]])
elif model_name == "deta-swin-large-o365":
_A = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]])
_A = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]])
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(snake_case__) , atol=1E-4)
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(snake_case__) , atol=1E-4)
print("""Everything ok!""")
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''')
Path(snake_case__).mkdir(exist_ok=snake_case__)
model.save_pretrained(snake_case__)
processor.save_pretrained(snake_case__)
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""")
model.push_to_hub(F'''jozhang97/{model_name}''')
processor.push_to_hub(F'''jozhang97/{model_name}''')
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 353 | import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_SCREAMING_SNAKE_CASE = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
_SCREAMING_SNAKE_CASE = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': F'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
'emoji': True,
},
}
]
_SCREAMING_SNAKE_CASE = 0
for log in Path().glob('*.log'):
_SCREAMING_SNAKE_CASE = 0
with open(log, 'r') as f:
for line in f:
_SCREAMING_SNAKE_CASE = json.loads(line)
if line.get('nodeid', '') != "":
_SCREAMING_SNAKE_CASE = line['nodeid']
if line.get('duration', None) is not None:
_SCREAMING_SNAKE_CASE = F'''{line["duration"]:.4f}'''
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_SCREAMING_SNAKE_CASE = []
log.unlink()
_SCREAMING_SNAKE_CASE = ''
_SCREAMING_SNAKE_CASE = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {}
for test in failed_tests:
_SCREAMING_SNAKE_CASE = test[0].split('::')
_SCREAMING_SNAKE_CASE = data[0].split('/')[-1]
if data[0] not in filesafailed:
_SCREAMING_SNAKE_CASE = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_SCREAMING_SNAKE_CASE = [test[0] for test in failed_table]
_SCREAMING_SNAKE_CASE = list(set(files))
# Count number of instances in failed_tests
_SCREAMING_SNAKE_CASE = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_SCREAMING_SNAKE_CASE = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
_SCREAMING_SNAKE_CASE = 'Too many failed tests, please see the full report in the Action results.'
_SCREAMING_SNAKE_CASE = len(err) + 10
_SCREAMING_SNAKE_CASE = message[: 3_000 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
_SCREAMING_SNAKE_CASE = 'No failed tests! 🤗'
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
_SCREAMING_SNAKE_CASE = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
_SCREAMING_SNAKE_CASE = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': F'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
_SCREAMING_SNAKE_CASE = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': F'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
_SCREAMING_SNAKE_CASE = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
_SCREAMING_SNAKE_CASE = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_SCREAMING_SNAKE_CASE = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
_SCREAMING_SNAKE_CASE = row[0]
else:
_SCREAMING_SNAKE_CASE = ''
_SCREAMING_SNAKE_CASE = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 81 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : Any ) -> Union[str, Any]:
a = data
a = None
class snake_case__ :
"""simple docstring"""
def __init__( self : Tuple ) -> List[Any]:
a = None
a = None
def __iter__( self : Any ) -> Iterator[Any]:
a = self.head
while self.head:
yield node.data
a = node.next
if node == self.head:
break
def __len__( self : Optional[int] ) -> int:
return sum(1 for _ in self )
def __repr__( self : Optional[Any] ) -> Optional[int]:
return "->".join(str(__lowerCamelCase ) for item in iter(self ) )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any ) -> None:
self.insert_nth(len(self ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Any ) -> None:
self.insert_nth(0 , __lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Any ) -> None:
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
a = Node(__lowerCamelCase )
if self.head is None:
a = new_node # first node points itself
a = a = new_node
elif index == 0: # insert at head
a = self.head
a = a = new_node
else:
a = self.head
for _ in range(index - 1 ):
a = temp.next
a = temp.next
a = new_node
if index == len(self ) - 1: # insert at tail
a = new_node
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
return self.delete_nth(0 )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
return self.delete_nth(len(self ) - 1 )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
a = self.head
if self.head == self.tail: # just one node
a = a = None
elif index == 0: # delete head node
a = self.tail.next.next
a = self.head.next
else:
a = self.head
for _ in range(index - 1 ):
a = temp.next
a = temp.next
a = temp.next.next
if index == len(self ) - 1: # delete at tail
a = temp
return delete_node.data
def __UpperCAmelCase ( self : str ) -> bool:
return len(self ) == 0
def __magic_name__ ( ):
'''simple docstring'''
a = CircularLinkedList()
assert len(A ) == 0
assert circular_linked_list.is_empty() is True
assert str(A ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(A ) == i
circular_linked_list.insert_nth(A, i + 1 )
assert str(A ) == "->".join(str(A ) for i in range(1, 6 ) )
circular_linked_list.insert_tail(6 )
assert str(A ) == "->".join(str(A ) for i in range(1, 7 ) )
circular_linked_list.insert_head(0 )
assert str(A ) == "->".join(str(A ) for i in range(0, 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(A ) == "->".join(str(A ) for i in range(1, 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2, 3 )
assert str(A ) == "->".join(str(A ) for i in range(1, 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCAmelCase__ : ArgumentParser ) -> int:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
raise NotImplementedError()
| 54 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowercase__ =(
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
lowercase__ =logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCamelCase ( ):
__a : Dict = '''https://pypi.org/pypi/diffusers/json'''
__a : Union[str, Any] = json.loads(request.urlopen(lowerCAmelCase__ ).read() )['''releases'''].keys()
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : version.Version(lowerCAmelCase__ ) )
def __UpperCamelCase ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
__a : Optional[Any] = Path(lowerCAmelCase__ ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, os.PathLike] ):
init_hf_modules()
__a : str = Path(lowerCAmelCase__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
__a : List[Any] = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] ):
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as f:
__a : Any = f.read()
# Imports of the form `import .xxx`
__a : Any = re.findall('''^\s*import\s+\.(\S+)\s*$''' , lowerCAmelCase__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , lowerCAmelCase__ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCAmelCase__ ) )
def __UpperCamelCase ( lowerCAmelCase__ : Dict ):
__a : List[str] = False
__a : str = [module_file]
__a : List[str] = []
# Let's recurse through all relative imports
while not no_change:
__a : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCAmelCase__ ) )
__a : str = Path(lowerCAmelCase__ ).parent
__a : Dict = [str(module_path / m ) for m in new_imports]
__a : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports]
__a : Dict = [f"{f}.py" for f in new_import_files]
__a : List[Any] = len(lowerCAmelCase__ ) == 0
all_relative_imports.extend(lowerCAmelCase__ )
return all_relative_imports
def __UpperCamelCase ( lowerCAmelCase__ : Dict ):
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as f:
__a : Union[str, Any] = f.read()
# Imports of the form `import xxx`
__a : Optional[Any] = re.findall('''^\s*import\s+(\S+)\s*$''' , lowerCAmelCase__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , lowerCAmelCase__ , flags=re.MULTILINE )
# Only keep the top-level module
__a : Optional[Any] = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
__a : Union[str, Any] = list(set(lowerCAmelCase__ ) )
__a : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCAmelCase__ )
except ImportError:
missing_packages.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f"{', '.join(lowerCAmelCase__ )}. Run `pip install {' '.join(lowerCAmelCase__ )}`" )
return get_relative_imports(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ):
__a : List[str] = module_path.replace(os.path.sep , '''.''' )
__a : Optional[int] = importlib.import_module(lowerCAmelCase__ )
if class_name is None:
return find_pipeline_class(lowerCAmelCase__ )
return getattr(lowerCAmelCase__ , lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
from ..pipelines import DiffusionPipeline
__a : Tuple = dict(inspect.getmembers(lowerCAmelCase__ , inspect.isclass ) )
__a : Optional[Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCAmelCase__ )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
f" {loaded_module}." )
__a : Any = cls
return pipeline_class
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, os.PathLike] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Union[str, os.PathLike]] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[Dict[str, str]] = None , lowerCAmelCase__ : Optional[Union[bool, str]] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : bool = False , ):
__a : List[Any] = str(lowerCAmelCase__ )
__a : List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
__a : Dict = module_file_or_url
__a : List[Any] = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
__a : List[str] = get_diffusers_versions()
# cut ".dev0"
__a : List[Any] = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
__a : str = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
__a : Union[str, Any] = f"v{revision}"
elif revision == "main":
__a : str = revision
else:
raise ValueError(
f"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
f" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
__a : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCAmelCase__ , pipeline=lowerCAmelCase__ )
try:
__a : Union[str, Any] = cached_download(
lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , )
__a : List[str] = '''git'''
__a : Tuple = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
__a : Any = hf_hub_download(
lowerCAmelCase__ , lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , )
__a : Any = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
__a : List[Any] = check_imports(lowerCAmelCase__ )
# Now we move the module inside our cached dynamic modules.
__a : List[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCAmelCase__ )
__a : List[Any] = Path(lowerCAmelCase__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCAmelCase__ , submodule_path / module_file )
for module_needed in modules_needed:
__a : Tuple = f"{module_needed}.py"
shutil.copy(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : List[str] = use_auth_token
elif use_auth_token is True:
__a : str = HfFolder.get_token()
else:
__a : Any = None
__a : List[Any] = model_info(lowerCAmelCase__ , revision=lowerCAmelCase__ , token=lowerCAmelCase__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__a : List[str] = submodule_path / commit_hash
__a : int = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCAmelCase__ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCAmelCase__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCAmelCase__ , f"{module_needed}.py" , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
return os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, os.PathLike] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[Union[str, os.PathLike]] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[Dict[str, str]] = None , lowerCAmelCase__ : Optional[Union[bool, str]] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : bool = False , **lowerCAmelCase__ : Optional[int] , ):
__a : Dict = get_cached_module_file(
lowerCAmelCase__ , lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
return get_class_in_module(lowerCAmelCase__ , final_module.replace('''.py''' , '''''' ) )
| 367 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 90 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowercase__ = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 151 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = "▁"
lowercase__ = {"vocab_file": "spiece.model"}
lowercase__ = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
lowercase__ = {
"google/reformer-crime-and-punishment": 524288,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowercase_ : Dict , lowercase_ : Tuple="</s>" , lowercase_ : Dict="<unk>" , lowercase_ : Tuple=[] , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : List[str] , ) -> None:
UpperCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase : List[Any] = vocab_file
UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self : List[str] ) -> Dict[str, int]:
UpperCAmelCase : int = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> str:
UpperCAmelCase : Tuple = self.__dict__.copy()
UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any ) -> List[str]:
UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase : Dict = {}
UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple ) -> Optional[int]:
return self.sp_model.piece_to_id(lowercase_ )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> List[str]:
if index < self.sp_model.get_piece_size():
UpperCAmelCase : Tuple = self.sp_model.IdToPiece(lowercase_ )
return token
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Dict = []
UpperCAmelCase : int = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCAmelCase : Any = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : int = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 151 | 1 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
a : str = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
a : Tuple = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Any:
'''simple docstring'''
a : Any = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_lowercase )[0]
@deprecated(_lowercase , "Please use tf.data to implement this functionality." )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_lowercase ) as bytestream:
a : Dict = _readaa(_lowercase )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
a : Tuple = _readaa(_lowercase )
a : Union[str, Any] = _readaa(_lowercase )
a : Any = _readaa(_lowercase )
a : int = bytestream.read(rows * cols * num_images )
a : str = numpy.frombuffer(_lowercase , dtype=numpy.uinta )
a : Dict = data.reshape(_lowercase , _lowercase , _lowercase , 1 )
return data
@deprecated(_lowercase , "Please use tf.one_hot on tensors." )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : Union[str, Any] ) ->Any:
'''simple docstring'''
a : Optional[Any] = labels_dense.shape[0]
a : Any = numpy.arange(_lowercase ) * num_classes
a : List[Any] = numpy.zeros((num_labels, num_classes) )
a : str = 1
return labels_one_hot
@deprecated(_lowercase , "Please use tf.data to implement this functionality." )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : Tuple=False , _lowercase : Tuple=10 ) ->Dict:
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_lowercase ) as bytestream:
a : Any = _readaa(_lowercase )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
a : Union[str, Any] = _readaa(_lowercase )
a : Any = bytestream.read(_lowercase )
a : Optional[int] = numpy.frombuffer(_lowercase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_lowercase , _lowercase )
return labels
class __UpperCamelCase :
@deprecated(
lowerCAmelCase__ , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=dtypes.floataa , lowerCAmelCase__=True , lowerCAmelCase__=None , ) -> Tuple:
a, a : str = random_seed.get_seed(lowerCAmelCase__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
a : Tuple = dtypes.as_dtype(lowerCAmelCase__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
a : Dict = 1_0000
a : int = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
a : Dict = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
a : List[Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
a : str = images.astype(numpy.floataa )
a : Optional[Any] = numpy.multiply(lowerCAmelCase__ , 1.0 / 255.0 )
a : Any = images
a : Optional[Any] = labels
a : int = 0
a : List[str] = 0
@property
def __a ( self ) -> Dict:
return self._images
@property
def __a ( self ) -> Dict:
return self._labels
@property
def __a ( self ) -> Optional[int]:
return self._num_examples
@property
def __a ( self ) -> List[str]:
return self._epochs_completed
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=True ) -> Any:
if fake_data:
a : Any = [1] * 784
a : str = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowerCAmelCase__ )],
[fake_label for _ in range(lowerCAmelCase__ )],
)
a : int = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
a : str = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCAmelCase__ )
a : List[str] = self.images[perma]
a : Optional[int] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
a : List[Any] = self._num_examples - start
a : Optional[Any] = self._images[start : self._num_examples]
a : Optional[int] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
a : List[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCAmelCase__ )
a : Dict = self.images[perm]
a : Dict = self.labels[perm]
# Start next epoch
a : Any = 0
a : List[Any] = batch_size - rest_num_examples
a : Tuple = self._index_in_epoch
a : int = self._images[start:end]
a : List[str] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
a : Dict = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowercase , "Please write your own downloading logic." )
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Dict , _lowercase : Optional[int] ) ->Any:
'''simple docstring'''
if not gfile.Exists(_lowercase ):
gfile.MakeDirs(_lowercase )
a : Union[str, Any] = os.path.join(_lowercase , _lowercase )
if not gfile.Exists(_lowercase ):
urllib.request.urlretrieve(_lowercase , _lowercase ) # noqa: S310
with gfile.GFile(_lowercase ) as f:
a : Dict = f.size()
print("Successfully downloaded" , _lowercase , _lowercase , "bytes." )
return filepath
@deprecated(
_lowercase , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Optional[int]=False , _lowercase : str=False , _lowercase : List[Any]=dtypes.floataa , _lowercase : Dict=True , _lowercase : Optional[int]=5000 , _lowercase : List[str]=None , _lowercase : List[str]=DEFAULT_SOURCE_URL , ) ->Union[str, Any]:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_lowercase , one_hot=_lowercase , dtype=_lowercase , seed=_lowercase )
a : Union[str, Any] = fake()
a : Union[str, Any] = fake()
a : List[str] = fake()
return _Datasets(train=_lowercase , validation=_lowercase , test=_lowercase )
if not source_url: # empty string check
a : Any = DEFAULT_SOURCE_URL
a : Tuple = "train-images-idx3-ubyte.gz"
a : List[Any] = "train-labels-idx1-ubyte.gz"
a : Union[str, Any] = "t10k-images-idx3-ubyte.gz"
a : List[str] = "t10k-labels-idx1-ubyte.gz"
a : Dict = _maybe_download(
_lowercase , _lowercase , source_url + train_images_file )
with gfile.Open(_lowercase , "rb" ) as f:
a : Tuple = _extract_images(_lowercase )
a : List[Any] = _maybe_download(
_lowercase , _lowercase , source_url + train_labels_file )
with gfile.Open(_lowercase , "rb" ) as f:
a : Tuple = _extract_labels(_lowercase , one_hot=_lowercase )
a : List[Any] = _maybe_download(
_lowercase , _lowercase , source_url + test_images_file )
with gfile.Open(_lowercase , "rb" ) as f:
a : Any = _extract_images(_lowercase )
a : Any = _maybe_download(
_lowercase , _lowercase , source_url + test_labels_file )
with gfile.Open(_lowercase , "rb" ) as f:
a : List[str] = _extract_labels(_lowercase , one_hot=_lowercase )
if not 0 <= validation_size <= len(_lowercase ):
a : int = (
"Validation size should be between 0 and "
F"""{len(_lowercase )}. Received: {validation_size}."""
)
raise ValueError(_lowercase )
a : str = train_images[:validation_size]
a : Optional[int] = train_labels[:validation_size]
a : List[Any] = train_images[validation_size:]
a : Optional[int] = train_labels[validation_size:]
a : Optional[Any] = {"dtype": dtype, "reshape": reshape, "seed": seed}
a : Union[str, Any] = _DataSet(_lowercase , _lowercase , **_lowercase )
a : Dict = _DataSet(_lowercase , _lowercase , **_lowercase )
a : int = _DataSet(_lowercase , _lowercase , **_lowercase )
return _Datasets(train=_lowercase , validation=_lowercase , test=_lowercase )
| 79 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if isinstance(_lowercase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCamelCase :
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
pass
def __a ( self ) -> List[Any]:
pass
def __a ( self ) -> str:
pass
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : Dict = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Dict:
a : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : int = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[Any]:
a, a : Optional[int] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = {"vision_model": vision_model, "text_model": text_model}
a : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : List[str] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[str, Any]:
a, a : Dict = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = {"vision_model": vision_model, "text_model": text_model}
a : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : List[str] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
a : str = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
a : Dict = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a : List[Any] = after_output[0]
a : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-3 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> List[Any]:
a, a : Union[str, Any] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[Any] = {"vision_model": vision_model, "text_model": text_model}
a : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
a : Tuple = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
a : int = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[int] = to_atuple(vision_model.config.image_size )
a : Tuple = to_atuple(vision_model.config.patch_size )
a : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a : str = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
a : List[Any] = inputs_dict
a : Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
a : int = pt_model(**lowerCAmelCase__ ).to_tuple()
a : Union[str, Any] = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
a : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
a : Optional[int] = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
a : Optional[int] = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
a : int = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4E-2 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = VisionTextDualEncoderModel(lowerCAmelCase__ )
a : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : Dict = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
a : List[str] = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[int] = VisionTextDualEncoderModel(lowerCAmelCase__ )
a : List[Any] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
a : int = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : int = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : Tuple = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def __a ( self ) -> Any:
a : List[Any] = self.prepare_config_and_inputs()
a : Tuple = config_inputs_dict.pop("vision_config" )
a : int = config_inputs_dict.pop("text_config" )
a : List[str] = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __a ( self ) -> List[Any]:
a, a : Optional[int] = self.get_pretrained_model_and_inputs()
a : Optional[int] = model_a(**lowerCAmelCase__ )
a : Optional[int] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
a : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
a : str = model_a(**lowerCAmelCase__ )
a : Dict = after_outputs[0]
a : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1E-5 )
@require_flax
class __UpperCamelCase ( a__ , unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
a : Any = 13
a : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a : str = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a : Optional[Any] = random_attention_mask([batch_size, 4] )
a : Optional[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Dict = FlaxViTModel(lowerCAmelCase__ )
a : Dict = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def __a ( self ) -> str:
a : Union[str, Any] = FlaxViTModelTester(self )
a : Dict = FlaxBertModelTester(self )
a : str = vit_model_tester.prepare_config_and_inputs()
a : Any = bert_model_tester.prepare_config_and_inputs()
a, a : Optional[int] = vision_config_and_inputs
a, a, a, a : Dict = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
a : Tuple = 13
a : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a : Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a : Tuple = random_attention_mask([batch_size, 4] )
a : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : List[Any] = FlaxCLIPVisionModel(lowerCAmelCase__ )
a : Tuple = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def __a ( self ) -> List[Any]:
a : Tuple = FlaxCLIPVisionModelTester(self )
a : Union[str, Any] = FlaxBertModelTester(self )
a : Dict = clip_model_tester.prepare_config_and_inputs()
a : Optional[int] = bert_model_tester.prepare_config_and_inputs()
a, a : Dict = vision_config_and_inputs
a, a, a, a : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Dict:
a : str = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
a : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
a : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" )
a : Optional[Any] = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a : List[str] = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1E-3 ) )
| 79 | 1 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCAmelCase_ : Dict = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> list[int]:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
a_ : Any = []
for num in range(len(__A ) ):
a_ : str = 0
while 2 * i * i <= odd_composites[num]:
a_ : Any = odd_composites[num] - 2 * i * i
if is_prime(__A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__A ) == n:
return list_nums
return []
def SCREAMING_SNAKE_CASE_ ( ) -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 32 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A__ = TypeVar("""T""")
A__ = TypeVar("""U""")
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = key
_lowerCAmelCase = val
_lowerCAmelCase = None
_lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase , _lowerCAmelCase = self.rear, self.head
def __repr__( self ):
"""simple docstring"""
_lowerCAmelCase = ["""DoubleLinkedList"""]
_lowerCAmelCase = self.head
while node.next is not None:
rep.append(str(_snake_case ) )
_lowerCAmelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCAmelCase = node
_lowerCAmelCase = previous
_lowerCAmelCase = node
_lowerCAmelCase = self.rear
def snake_case ( self , _snake_case ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_lowerCAmelCase = node.next
_lowerCAmelCase = node.prev
_lowerCAmelCase = None
_lowerCAmelCase = None
return node
class __lowerCAmelCase ( Generic[T, U] ):
__lowerCamelCase = {}
def __init__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedList()
_lowerCAmelCase = capacity
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = {}
def __repr__( self ):
"""simple docstring"""
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , _snake_case ):
"""simple docstring"""
return key in self.cache
def snake_case ( self , _snake_case ):
"""simple docstring"""
if key in self.cache:
self.hits += 1
_lowerCAmelCase = self.cache[key]
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_snake_case )
return node.val
self.miss += 1
return None
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCAmelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCAmelCase = value
self.list.add(_snake_case )
@classmethod
def snake_case ( cls , _snake_case = 128 ):
"""simple docstring"""
def cache_decorator_inner(_snake_case ) -> Callable[..., U]:
def cache_decorator_wrapper(*_snake_case ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCAmelCase = LRUCache(_snake_case )
_lowerCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCAmelCase = func(*_snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , _snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_snake_case , """cache_info""" , _snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> bool:
SCREAMING_SNAKE_CASE = int(number**0.5 )
return number == sq * sq
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> tuple[int, int]:
SCREAMING_SNAKE_CASE = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE = x_den * y_den * z_den
SCREAMING_SNAKE_CASE = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
top //= hcf
bottom //= hcf
return top, bottom
def lowercase (SCREAMING_SNAKE_CASE_ : int = 35 ) -> int:
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = Fraction(0 )
SCREAMING_SNAKE_CASE = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
SCREAMING_SNAKE_CASE = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE = x_den * y_den
SCREAMING_SNAKE_CASE = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
# n=2
SCREAMING_SNAKE_CASE = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE = x_den * x_den * y_den * y_den
if is_sq(SCREAMING_SNAKE_CASE_ ) and is_sq(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
# n=-1
SCREAMING_SNAKE_CASE = x_num * y_num
SCREAMING_SNAKE_CASE = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
# n=2
SCREAMING_SNAKE_CASE = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(SCREAMING_SNAKE_CASE_ ) and is_sq(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = int(sqrt(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
unique_s.add(SCREAMING_SNAKE_CASE_ )
for num, den in unique_s:
total += Fraction(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 367 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if len(lowerCAmelCase__ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = degree
def __add__( self , lowerCAmelCase__ ) -> Polynomial:
if self.degree > polynomial_a.degree:
SCREAMING_SNAKE_CASE = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase__ )
def __sub__( self , lowerCAmelCase__ ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , lowerCAmelCase__ ) -> Polynomial:
SCREAMING_SNAKE_CASE = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> int | float:
SCREAMING_SNAKE_CASE = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
SCREAMING_SNAKE_CASE = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase__ )
return polynomial
def __repr__( self ) -> str:
return self.__str__()
def __A ( self ) -> Polynomial:
SCREAMING_SNAKE_CASE = [0] * self.degree
for i in range(self.degree ):
SCREAMING_SNAKE_CASE = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ = 0 ) -> Polynomial:
SCREAMING_SNAKE_CASE = [0] * (self.degree + 2)
SCREAMING_SNAKE_CASE = constant
for i in range(self.degree + 1 ):
SCREAMING_SNAKE_CASE = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase__ )
def __eq__( self , lowerCAmelCase__ ) -> bool:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , lowerCAmelCase__ ) -> bool:
return not self.__eq__(lowerCAmelCase__ )
| 38 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
a_ = HfArgumentParser(InitializationArguments)
a_ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
a_ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
a_ = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
a_ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
a_ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 340 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionSAGPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowercase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase_ = CLIPTextModel(UpperCAmelCase )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
'''simple docstring'''
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
lowercase_ = sag_pipe.to(UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "."
lowercase_ = torch.manual_seed(0 )
lowercase_ = sag_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase_ = sag_pipe.to(UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "."
lowercase_ = torch.manual_seed(0 )
lowercase_ = sag_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase_ = sag_pipe.to(UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "."
lowercase_ = torch.manual_seed(0 )
lowercase_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
lowercase_ = output.images
assert image.shape == (1, 512, 768, 3)
| 297 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297 | 1 |
import logging
from transformers.configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = """masked_bert"""
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="topK" , UpperCamelCase__="constant" , UpperCamelCase__=0.0 , **UpperCamelCase__ , ) -> List[Any]:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : Any = type_vocab_size
lowerCamelCase : str = initializer_range
lowerCamelCase : Union[str, Any] = layer_norm_eps
lowerCamelCase : int = pruning_method
lowerCamelCase : Tuple = mask_init
lowerCamelCase : List[Any] = mask_scale
| 48 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = os.path.join(args.tf_model_dir , "parameters.json" )
a : str = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
a : str = args.output + ".pt"
a : Dict = OrderedDict()
with tf.device("/CPU:0" ):
a : Optional[int] = tf.train.load_checkpoint(args.tf_model_dir )
a : Optional[Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
a : Dict = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
a : Union[str, Any] = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
a : Optional[int] = 8
a : Dict = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
a : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : Dict = torch.tensor(_lowercase )
elif key_name.startswith("model/moe" ):
a : List[str] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
a : str = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
a : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[str] = torch.tensor(_lowercase )
elif key_name.endswith("/softmlp/kernel" ):
a : Optional[int] = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
a : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
a : Any = key_name[-9:-7]
for i in range(16 ):
a : List[Any] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
a : str = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
a : Dict = torch.tensor(_lowercase )
elif key_name.startswith("model/mlp" ):
a : Union[str, Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
a : str = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
a : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : Optional[int] = torch.tensor(_lowercase )
elif key_name.endswith("/p1/bias" ):
a : str = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
a : List[Any] = vnp.copy() # same because it is one dimensional
a : Tuple = torch.tensor(_lowercase )
elif key_name.endswith("/p2/kernel" ):
a : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
a : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/p2/bias" ):
a : Dict = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
a : List[str] = vnp.copy() # same because it is one dimensional
a : str = torch.tensor(_lowercase )
elif key_name.startswith("model/ln" ):
a : List[str] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
a : Optional[Any] = "model.blocks.%d.feed_forward.norm.bias" % player
a : Tuple = vnp.copy() # same because it is one dimensional
a : int = torch.tensor(_lowercase )
elif key_name.endswith("/g" ):
a : Optional[Any] = "model.blocks.%d.feed_forward.norm.weight" % player
a : List[str] = vnp.copy() # same because it is one dimensional
a : Tuple = torch.tensor(_lowercase )
elif key_name.startswith("model/att" ):
a : Optional[Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
a : Union[str, Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
a : List[str] = state[:, 0, :, :]
a : Dict = state[:, 1, :, :]
a : Union[str, Any] = state[:, 2, :, :]
a : str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a : List[str] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a : Dict = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
a : Union[str, Any] = torch.tensor(_lowercase )
a : Any = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
a : List[str] = torch.tensor(_lowercase )
a : Optional[Any] = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
a : Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/o/kernel" ):
a : Any = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
a : Optional[int] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
a : Tuple = torch.tensor(_lowercase )
elif key_name.startswith("model/an" ):
a : List[str] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
a : Optional[int] = "model.blocks.%d.self_attn.norm.bias" % player
a : Union[str, Any] = vnp.copy() # same because it is one dimensional
a : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/g" ):
a : Any = "model.blocks.%d.self_attn.norm.weight" % player
a : str = vnp.copy() # same because it is one dimensional
a : Any = torch.tensor(_lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
a : Optional[int] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
a : Tuple = "model.%s.weight" % nlayer
a : Any = vnp.copy() # same in embedded
a : Tuple = torch.tensor(_lowercase )
if key_name.startswith("model/wte" ):
a : Optional[int] = "lm_head.weight"
a : Optional[int] = vnp.copy() # same in embedded
a : Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("model/wob" ):
a : Optional[int] = "final_logits_bias"
a : Optional[Any] = vnp.copy() # same in embedded
a : Optional[int] = state.reshape((1, -1) )
a : List[Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
a : Optional[int] = "model.last_project.weight"
a : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
a : Dict = "model.last_project.bias"
a : Optional[Any] = vnp.copy() # same because it is one dimensional
a : Any = torch.tensor(_lowercase )
torch.save(_lowercase , args.output )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
a : Tuple = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 105 | 0 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''vision-encoder-decoder'''
UpperCAmelCase : List[Any] = True
def __init__( self : List[str] , **_UpperCAmelCase : Tuple ):
super().__init__(**_UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_A = kwargs.pop('encoder' )
_A = encoder_config.pop('model_type' )
_A = kwargs.pop('decoder' )
_A = decoder_config.pop('model_type' )
_A = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_A = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
_A = True
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : PretrainedConfig , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : List[str] ):
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_A = True
_A = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = copy.deepcopy(self.__dict__ )
_A = self.encoder.to_dict()
_A = self.decoder.to_dict()
_A = self.__class__.model_type
return output
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Dict = version.parse('''1.11''' )
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self : Any ):
return 1E-4
@property
def lowerCAmelCase_ ( self : Tuple ):
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = OrderedDict()
_A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_A = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : "PreTrainedTokenizerBase" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , ):
import torch
_A = OrderedDict()
_A = super().generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
_A , _A = dummy_input['input_ids'].shape
_A = (batch, encoder_sequence, self._config.encoder_hidden_size)
_A = dummy_input.pop('input_ids' )
_A = dummy_input.pop('attention_mask' )
_A = torch.zeros(_UpperCAmelCase )
return common_inputs
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : PretrainedConfig , _UpperCAmelCase : PretrainedConfig , _UpperCAmelCase : str = "default" ):
_A = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_UpperCAmelCase , _UpperCAmelCase )
| 353 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
a = logging.get_logger(__name__)
def _snake_case ( _snake_case : bool , _snake_case : bool ) -> Tuple:
'''simple docstring'''
def run_func(_snake_case : Any ):
@wraps(_snake_case )
def run_in_eager_mode(*_snake_case : List[str] , **_snake_case : Tuple ):
return func(*_snake_case , **_snake_case )
@wraps(_snake_case )
@tf.function(experimental_compile=_snake_case )
def run_in_graph_mode(*_snake_case : Dict , **_snake_case : Tuple ):
return func(*_snake_case , **_snake_case )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int ) -> ["tf.Tensor"]:
'''simple docstring'''
_A = random.Random()
_A = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_snake_case , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : TensorFlowBenchmarkArguments
UpperCAmelCase : PretrainedConfig
UpperCAmelCase : str = "TensorFlow"
@property
def lowerCAmelCase_ ( self : str ):
return tf.__version__
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
# initialize GPU on separate process
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_inference_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_speed(_inference )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_train_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_speed(_train )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCAmelCase )
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_inference_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_memory(_inference )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCAmelCase )
_A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_A = self._prepare_train_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return self._measure_memory(_train )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
_A = (
hasattr(_UpperCAmelCase , 'architectures' )
and isinstance(config.architectures , _UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_A = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
_A = __import__('transformers' , fromlist=[model_class] )
_A = getattr(_UpperCAmelCase , _UpperCAmelCase )
_A = model_cls(_UpperCAmelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
_A = TF_MODEL_MAPPING[config.__class__](_UpperCAmelCase )
# encoder-decoder has vocab size saved differently
_A = config.vocab_size if hasattr(_UpperCAmelCase , 'vocab_size' ) else config.encoder.vocab_size
_A = random_input_ids(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , training=_UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(_UpperCAmelCase , training=_UpperCAmelCase )
_A = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
_A = (
hasattr(_UpperCAmelCase , 'architectures' )
and isinstance(config.architectures , _UpperCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_A = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
_A = __import__('transformers' , fromlist=[model_class] )
_A = getattr(_UpperCAmelCase , _UpperCAmelCase )
_A = model_cls(_UpperCAmelCase )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
_A = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_UpperCAmelCase )
# encoder-decoder has vocab size saved differently
_A = config.vocab_size if hasattr(_UpperCAmelCase , 'vocab_size' ) else config.encoder.vocab_size
_A = random_input_ids(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_A = model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )[0]
_A = tf.gradients(_UpperCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )[0]
_A = tf.gradients(_UpperCAmelCase , model.trainable_variables )
return gradients
_A = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : int ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(_UpperCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_A = timeit.repeat(
_UpperCAmelCase , repeat=self.args.repeat , number=10 , )
return min(_UpperCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Callable[[], None] ):
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
_A = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
_A = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
_A = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_A = nvml.nvmlDeviceGetMemoryInfo(_UpperCAmelCase )
_A = meminfo.used
_A = Memory(_UpperCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
_A = None
else:
_A = measure_peak_memory_cpu(_UpperCAmelCase )
_A = Memory(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
_A = stop_memory_tracing(_UpperCAmelCase )
if memory is None:
_A = summary.total
else:
_A = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 271 | 0 |
from __future__ import annotations
from collections import namedtuple
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> tuple:
'''simple docstring'''
lowerCAmelCase : int = namedtuple('result', 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage', power / current )
elif current == 0:
return result('current', power / voltage )
elif power == 0:
return result('power', float(round(abs(voltage * current ), 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : str = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def __lowercase ( snake_case_ : np.ndarray ,snake_case_ : float ,snake_case_ : int = 16000 ) ->str:
'''simple docstring'''
__A : Tuple = int(round(sample_rate * max_length ) )
if len(_a ) <= sample_length:
return wav
__A : Dict = randint(0 ,len(_a ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """A file containing the training audio paths and labels."""} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
_lowerCamelCase = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to \'train\'"""
} , )
_lowerCamelCase = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to \'validation\'"""
)
} , )
_lowerCamelCase = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to \'audio\'"""} , )
_lowerCamelCase = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to \'label\'"""} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
_lowerCamelCase = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
_lowerCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Name or path of preprocessor config."""} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def UpperCamelCase__( self ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , _SCREAMING_SNAKE_CASE , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def __lowercase ( ) ->str:
'''simple docstring'''
__A : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__A : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__A : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' ,_a ,_a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__A : int = training_args.get_process_log_level()
logger.setLevel(_a )
transformers.utils.logging.set_verbosity(_a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__A : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__A : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
__A : Tuple = DatasetDict()
__A : Optional[int] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.train_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
__A : Tuple = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,split=data_args.eval_split_name ,use_auth_token=True if model_args.use_auth_token else None ,)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. """
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. """
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__A : Tuple = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path ,return_attention_mask=model_args.attention_mask ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__A : List[str] = raw_datasets.cast_column(
data_args.audio_column_name ,datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__A : Dict = feature_extractor.model_input_names[0]
def train_transforms(snake_case_ : Optional[Any] ):
__A : Dict = []
for audio in batch[data_args.audio_column_name]:
__A : Any = random_subsample(
audio['''array'''] ,max_length=data_args.max_length_seconds ,sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_a )
__A : Any = feature_extractor(_a ,sampling_rate=feature_extractor.sampling_rate )
__A : Tuple = {model_input_name: inputs.get(_a )}
__A : Optional[int] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case_ : str ):
__A : Optional[Any] = [audio["array"] for audio in batch[data_args.audio_column_name]]
__A : Tuple = feature_extractor(_a ,sampling_rate=feature_extractor.sampling_rate )
__A : Any = {model_input_name: inputs.get(_a )}
__A : str = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__A : Union[str, Any] = raw_datasets["train"].features[data_args.label_column_name].names
__A : Dict = {}, {}
for i, label in enumerate(_a ):
__A : Dict = str(_a )
__A : Tuple = label
# Load the accuracy metric from the datasets package
__A : str = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case_ : List[Any] ):
__A : Dict = np.argmax(eval_pred.predictions ,axis=1 )
return metric.compute(predictions=_a ,references=eval_pred.label_ids )
__A : str = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(_a ) ,labelaid=_a ,idalabel=_a ,finetuning_task='''audio-classification''' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
__A : Optional[int] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=_a ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__A : Any = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_a ,output_all_columns=_a )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__A : Optional[int] = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_a ,output_all_columns=_a )
# Initialize our trainer
__A : List[Any] = Trainer(
model=_a ,args=_a ,train_dataset=raw_datasets['''train'''] if training_args.do_train else None ,eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None ,compute_metrics=_a ,tokenizer=_a ,)
# Training
if training_args.do_train:
__A : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
__A : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__A : Tuple = last_checkpoint
__A : int = trainer.train(resume_from_checkpoint=_a )
trainer.save_model()
trainer.log_metrics('''train''' ,train_result.metrics )
trainer.save_metrics('''train''' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__A : List[str] = trainer.evaluate()
trainer.log_metrics('''eval''' ,_a )
trainer.save_metrics('''eval''' ,_a )
# Write model card and (optionally) push to hub
__A : Optional[int] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_a )
else:
trainer.create_model_card(**_a )
if __name__ == "__main__":
main()
| 364 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
UpperCAmelCase_ : Dict = namedtuple("""CoinsDistribResult""", """moves excess""")
def _A (__a ) -> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(__a ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__a ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__a ) != count_coins(__a ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__a ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_distrib(node.left )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = get_distrib(node.right )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1 - left_distrib_excess
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1 - right_distrib_excess
SCREAMING_SNAKE_CASE_ : Dict = (
left_distrib_moves
+ right_distrib_moves
+ abs(__a )
+ abs(__a )
)
SCREAMING_SNAKE_CASE_ : int = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__a , __a )
return get_distrib(__a )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91 |
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310 | 0 |
from sklearn.metrics import mean_squared_error
import datasets
lowercase ='\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
lowercase ='\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
lowercase ='\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float')),
"references": datasets.Sequence(datasets.Value('float')),
}
else:
return {
"predictions": datasets.Value('float'),
"references": datasets.Value('float'),
}
def lowerCAmelCase ( self , snake_case , snake_case , snake_case=None , snake_case="uniform_average" , snake_case=True) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : str =mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case)
return {"mse": mse}
| 359 |
'''simple docstring'''
from __future__ import annotations
import bisect
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ):
'''simple docstring'''
if hi < 0:
_UpperCAmelCase : int =len(__lowerCamelCase )
while lo < hi:
_UpperCAmelCase : Dict =lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_UpperCAmelCase : int =mid + 1
else:
_UpperCAmelCase : Union[str, Any] =mid
return lo
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ):
'''simple docstring'''
if hi < 0:
_UpperCAmelCase : str =len(__lowerCamelCase )
while lo < hi:
_UpperCAmelCase : List[Any] =lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_UpperCAmelCase : Dict =mid + 1
else:
_UpperCAmelCase : Tuple =mid
return lo
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =0
_UpperCAmelCase : Any =len(__lowerCamelCase ) - 1
while left <= right:
_UpperCAmelCase : int =left + (right - left) // 2
_UpperCAmelCase : Union[str, Any] =sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_UpperCAmelCase : Optional[Any] =midpoint - 1
else:
_UpperCAmelCase : int =midpoint + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Any =bisect.bisect_left(__lowerCamelCase , __lowerCamelCase )
if index != len(__lowerCamelCase ) and sorted_collection[index] == item:
return index
return None
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
if right < left:
return None
_UpperCAmelCase : str =left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__lowerCamelCase , __lowerCamelCase , midpoint + 1 , __lowerCamelCase )
if __name__ == "__main__":
lowercase =input('Enter numbers separated by comma:\n').strip()
lowercase =sorted(int(item) for item in user_input.split(','))
lowercase =int(input('Enter a single number to be found in the list:\n'))
lowercase =binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 242 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : bool = True ) -> int:
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> None:
"""simple docstring"""
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(UpperCamelCase__ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
__lowerCamelCase = lower
__lowerCamelCase = higher
__lowerCamelCase = []
while True:
__lowerCamelCase = get_avg(UpperCamelCase__ , UpperCamelCase__ )
last_numbers.append(UpperCamelCase__ )
if answer(UpperCamelCase__ ) == "low":
__lowerCamelCase = number
elif answer(UpperCamelCase__ ) == "high":
__lowerCamelCase = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
__lowerCamelCase = int(input('Enter lower value : ' ).strip() )
__lowerCamelCase = int(input('Enter high value : ' ).strip() )
__lowerCamelCase = int(input('Enter value to guess : ' ).strip() )
guess_the_number(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 90 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : str = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
lowerCamelCase_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _A ( lowercase ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
a =model_type_to_module_name(lowercase )
a =importlib.import_module(f'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(lowercase , lowercase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowercase , '''__name__''' , lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a =importlib.import_module('''transformers''' )
if hasattr(lowercase , lowercase ):
return getattr(lowercase , lowercase )
return None
def _A ( lowercase , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = False , **lowercase , ):
"""simple docstring"""
a =get_file_from_repo(
lowercase , lowercase , cache_dir=lowercase , force_download=lowercase , resume_download=lowercase , proxies=lowercase , use_auth_token=lowercase , revision=lowercase , local_files_only=lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(lowercase , encoding='''utf-8''' ) as reader:
return json.load(lowercase )
class __A :
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__A )
def SCREAMING_SNAKE_CASE ( cls , __A , **__A ) -> Dict:
a =kwargs.pop('''config''' , __A )
a =kwargs.pop('''trust_remote_code''' , __A )
a =True
a , a =ImageProcessingMixin.get_image_processor_dict(__A , **__A )
a =config_dict.get('''image_processor_type''' , __A )
a =None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
a =config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
a =config_dict.pop('''feature_extractor_type''' , __A )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
a =feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
a =config_dict['''auto_map''']['''AutoFeatureExtractor''']
a =feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__A , __A ):
a =AutoConfig.from_pretrained(__A , **__A )
# It could be in `config.image_processor_type``
a =getattr(__A , '''image_processor_type''' , __A )
if hasattr(__A , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
a =config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
a =image_processor_class_from_name(__A )
a =image_processor_auto_map is not None
a =image_processor_class is not None or type(__A ) in IMAGE_PROCESSOR_MAPPING
a =resolve_trust_remote_code(
__A , __A , __A , __A )
if has_remote_code and trust_remote_code:
a =get_class_from_dynamic_module(
__A , __A , **__A )
a =kwargs.pop('''code_revision''' , __A )
if os.path.isdir(__A ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__A , **__A )
elif image_processor_class is not None:
return image_processor_class.from_dict(__A , **__A )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__A ) in IMAGE_PROCESSOR_MAPPING:
a =IMAGE_PROCESSOR_MAPPING[type(__A )]
return image_processor_class.from_dict(__A , **__A )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def SCREAMING_SNAKE_CASE ( __A , __A ) -> Any:
IMAGE_PROCESSOR_MAPPING.register(__A , __A ) | 81 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict):
# Initialise PyTorch model
lowercase__ : List[Any] = FunnelConfig.from_json_file(_lowerCamelCase)
print(f'''Building PyTorch model from configuration: {config}''')
lowercase__ : Dict = FunnelBaseModel(_lowerCamelCase) if base_model else FunnelModel(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 333 | import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , unittest.TestCase ):
lowercase__ = "ssube/stable-diffusion-x4-upscaler-onnx"
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int]=0):
"""simple docstring"""
lowercase_ = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(lowerCamelCase__))
lowercase_ = torch.manual_seed(lowerCamelCase__)
lowercase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""")
pipe.set_progress_bar_config(disable=lowerCamelCase__)
lowercase_ = self.get_dummy_inputs()
lowercase_ = pipe(**lowerCamelCase__).images
lowercase_ = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""")
lowercase_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__)
pipe.set_progress_bar_config(disable=lowerCamelCase__)
lowercase_ = self.get_dummy_inputs()
lowercase_ = pipe(**lowerCamelCase__).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""")
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase__)
lowercase_ = self.get_dummy_inputs()
lowercase_ = pipe(**lowerCamelCase__).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""")
lowercase_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase__)
lowercase_ = self.get_dummy_inputs()
lowercase_ = pipe(**lowerCamelCase__).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""")
lowercase_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase__)
lowercase_ = self.get_dummy_inputs()
lowercase_ = pipe(**lowerCamelCase__).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = ort.SessionOptions()
lowercase_ = False
return options
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""")
lowercase_ = init_image.resize((1_2_8, 1_2_8))
# using the PNDM scheduler by default
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__)
lowercase_ = """A fantasy landscape, trending on artstation"""
lowercase_ = torch.manual_seed(0)
lowercase_ = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCamelCase__ , output_type="""np""" , )
lowercase_ = output.images
lowercase_ = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""")
lowercase_ = init_image.resize((1_2_8, 1_2_8))
lowercase_ = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""")
lowercase_ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__)
lowercase_ = """A fantasy landscape, trending on artstation"""
lowercase_ = torch.manual_seed(0)
lowercase_ = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=lowerCamelCase__ , output_type="""np""" , )
lowercase_ = output.images
lowercase_ = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 136 |
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = {}
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
if vertex not in self.adjacency:
__lowerCamelCase = {}
self.num_vertices += 1
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
self.add_vertex(lowerCamelCase__ )
self.add_vertex(lowerCamelCase__ )
if head == tail:
return
__lowerCamelCase = weight
__lowerCamelCase = weight
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.get_edges()
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase__ ) ):
__lowerCamelCase = list(edges[i] )
edges.sort(key=lambda lowerCamelCase__ : e[2] )
for i in range(len(lowerCamelCase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__lowerCamelCase = edges[i][2] + 1
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
__lowerCamelCase = weight
__lowerCamelCase = weight
def __str__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__lowerCamelCase = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def lowercase_ ( lowerCamelCase__=None , lowerCamelCase__=None ) -> str:
'''simple docstring'''
__lowerCamelCase = Graph()
if vertices is None:
__lowerCamelCase = []
if edges is None:
__lowerCamelCase = []
for vertex in vertices:
g.add_vertex(lowerCamelCase__ )
for edge in edges:
g.add_edge(*lowerCamelCase__ )
return g
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = {}
def __len__( self ) -> Tuple:
'''simple docstring'''
return len(self.parent )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
if item in self.parent:
return self.find(lowerCamelCase__ )
__lowerCamelCase = item
__lowerCamelCase = 0
return item
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(lowerCamelCase__ )
if item != self.parent[item]:
__lowerCamelCase = self.find(self.parent[item] )
return self.parent[item]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = self.find(lowerCamelCase__ )
__lowerCamelCase = self.find(lowerCamelCase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__lowerCamelCase = roota
return roota
if self.rank[roota] < self.rank[roota]:
__lowerCamelCase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__lowerCamelCase = roota
return roota
return None
@staticmethod
def lowercase_ ( lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = graph.num_vertices
__lowerCamelCase = Graph.UnionFind()
__lowerCamelCase = []
while num_components > 1:
__lowerCamelCase = {}
for vertex in graph.get_vertices():
__lowerCamelCase = -1
__lowerCamelCase = graph.get_edges()
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
edges.remove((tail, head, weight) )
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
__lowerCamelCase = union_find.find(lowerCamelCase__ )
__lowerCamelCase = union_find.find(lowerCamelCase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCamelCase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCamelCase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = cheap_edge[vertex]
if union_find.find(lowerCamelCase__ ) != union_find.find(lowerCamelCase__ ):
union_find.union(lowerCamelCase__ , lowerCamelCase__ )
mst_edges.append(cheap_edge[vertex] )
__lowerCamelCase = num_components - 1
__lowerCamelCase = Graph.build(edges=lowerCamelCase__ )
return mst
| 90 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _snake_case ( A , A , A , A , ) -> list[float]:
lowerCAmelCase__ , lowerCAmelCase__ = coefficient_matrix.shape
lowerCAmelCase__ , lowerCAmelCase__ = constant_matrix.shape
if rowsa != colsa:
lowerCAmelCase__ = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(A )
if colsa != 1:
lowerCAmelCase__ = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(A )
if rowsa != rowsa:
lowerCAmelCase__ = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(A )
if len(A ) != rowsa:
lowerCAmelCase__ = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"""matrix but received {len(A )} and {rowsa}"""
)
raise ValueError(A )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
lowerCAmelCase__ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCAmelCase__ , lowerCAmelCase__ = table.shape
strictly_diagonally_dominant(A )
# Iterates the whole matrix for given number of times
for _ in range(A ):
lowerCAmelCase__ = []
for row in range(A ):
lowerCAmelCase__ = 0
for col in range(A ):
if col == row:
lowerCAmelCase__ = table[row][col]
elif col == cols - 1:
lowerCAmelCase__ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCAmelCase__ = (temp + val) / denom
new_val.append(A )
lowerCAmelCase__ = new_val
return [float(A ) for i in new_val]
def _snake_case ( A ) -> bool:
lowerCAmelCase__ , lowerCAmelCase__ = table.shape
lowerCAmelCase__ = True
for i in range(0 , A ):
lowerCAmelCase__ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod() | 228 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _snake_case ( A , A , A , A=5 ) -> List[str]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
lowerCAmelCase__ = torch.tensor(tokenizer.encode(A , add_special_tokens=A ) ).unsqueeze(0 ) # Batch size 1
lowerCAmelCase__ = model(A )[0] # The last hidden-state is the first element of the output tuple
lowerCAmelCase__ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowerCAmelCase__ = logits[0, masked_index, :]
lowerCAmelCase__ = logits.softmax(dim=0 )
lowerCAmelCase__ , lowerCAmelCase__ = prob.topk(k=A , dim=0 )
lowerCAmelCase__ = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A ) )] )
lowerCAmelCase__ = tokenizer.mask_token
lowerCAmelCase__ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
lowerCAmelCase__ = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(A ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(A ) , A ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A , A ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__UpperCAmelCase = CamembertTokenizer.from_pretrained('''camembert-base''')
__UpperCAmelCase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
__UpperCAmelCase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3)) | 228 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=snake_case_ ):
"""simple docstring"""
snake_case = ['''flax''']
def __init__( self : List[Any] , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : int , *__UpperCAmelCase : Any , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=snake_case_ ):
"""simple docstring"""
snake_case = ['''flax''']
def __init__( self : Tuple , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__UpperCAmelCase : Dict , **__UpperCAmelCase : int ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Any , *__UpperCAmelCase : Dict , **__UpperCAmelCase : str ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=snake_case_ ):
"""simple docstring"""
snake_case = ['''flax''']
def __init__( self : Any , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Dict ):
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : int ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Dict , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Any ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=snake_case_ ):
"""simple docstring"""
snake_case = ['''flax''']
def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Dict ):
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : int , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=snake_case_ ):
"""simple docstring"""
snake_case = ['''flax''']
def __init__( self : List[Any] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : str , *__UpperCAmelCase : Dict , **__UpperCAmelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : str , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=snake_case_ ):
"""simple docstring"""
snake_case = ['''flax''']
def __init__( self : Optional[int] , *__UpperCAmelCase : Any , **__UpperCAmelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : str , *__UpperCAmelCase : int , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=snake_case_ ):
"""simple docstring"""
snake_case = ['''flax''']
def __init__( self : str , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : int ):
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *__UpperCAmelCase : int , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : str , *__UpperCAmelCase : Dict , **__UpperCAmelCase : str ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=snake_case_ ):
"""simple docstring"""
snake_case = ['''flax''']
def __init__( self : Optional[Any] , *__UpperCAmelCase : int , **__UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Any , *__UpperCAmelCase : Dict , **__UpperCAmelCase : Any ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__UpperCAmelCase : Dict , **__UpperCAmelCase : int ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=snake_case_ ):
"""simple docstring"""
snake_case = ['''flax''']
def __init__( self : List[str] , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Any , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=snake_case_ ):
"""simple docstring"""
snake_case = ['''flax''']
def __init__( self : Optional[Any] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=snake_case_ ):
"""simple docstring"""
snake_case = ['''flax''']
def __init__( self : Any , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : int ):
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__UpperCAmelCase : str , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=snake_case_ ):
"""simple docstring"""
snake_case = ['''flax''']
def __init__( self : Dict , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=snake_case_ ):
"""simple docstring"""
snake_case = ['''flax''']
def __init__( self : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *__UpperCAmelCase : Dict , **__UpperCAmelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ["flax"] )
| 79 |
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ):
_A = len(set_a.intersection(__lowercase ) )
if alternative_union:
_A = len(__lowercase ) + len(__lowercase )
else:
_A = len(set_a.union(__lowercase ) )
return intersection / union
if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ):
_A = [element for element in set_a if element in set_b]
if alternative_union:
_A = len(__lowercase ) + len(__lowercase )
return len(__lowercase ) / union
else:
_A = set_a + [element for element in set_b if element not in set_a]
return len(__lowercase ) / len(__lowercase )
return len(__lowercase ) / len(__lowercase )
return None
if __name__ == "__main__":
lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''}
lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 79 | 1 |
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase: Any = logging.get_logger(__name__)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["input_values", "attention_mask"]
def __init__( self ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = 1_60_00 ,UpperCAmelCase_ = 0.0 ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 80 ,UpperCAmelCase_ = 16 ,UpperCAmelCase_ = 64 ,UpperCAmelCase_ = "hann_window" ,UpperCAmelCase_ = 1.0 ,UpperCAmelCase_ = 80 ,UpperCAmelCase_ = 76_00 ,UpperCAmelCase_ = 1E-10 ,UpperCAmelCase_ = 2 ,UpperCAmelCase_ = True ,**UpperCAmelCase_ ,):
super().__init__(feature_size=UpperCAmelCase_ ,sampling_rate=UpperCAmelCase_ ,padding_value=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : List[str] = do_normalize
_lowercase : Any = return_attention_mask
_lowercase : str = num_mel_bins
_lowercase : Dict = hop_length
_lowercase : Dict = win_length
_lowercase : int = win_function
_lowercase : Any = frame_signal_scale
_lowercase : Any = fmin
_lowercase : Union[str, Any] = fmax
_lowercase : Union[str, Any] = mel_floor
_lowercase : Any = reduction_factor
_lowercase : List[str] = win_length * sampling_rate // 10_00
_lowercase : Dict = hop_length * sampling_rate // 10_00
_lowercase : Optional[Any] = optimal_fft_length(self.sample_size )
_lowercase : List[Any] = (self.n_fft // 2) + 1
_lowercase : Any = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=UpperCAmelCase_ )
_lowercase : Dict = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.num_mel_bins ,min_frequency=self.fmin ,max_frequency=self.fmax ,sampling_rate=self.sampling_rate ,norm="""slaney""" ,mel_scale="""slaney""" ,)
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" ,UpperCAmelCase_ ,)
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" ,UpperCAmelCase_ ,)
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCamelCase__ ( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = 0.0 ):
if attention_mask is not None:
_lowercase : Any = np.array(UpperCAmelCase_ ,np.intaa )
_lowercase : List[Any] = []
for vector, length in zip(UpperCAmelCase_ ,attention_mask.sum(-1 ) ):
_lowercase : List[str] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_lowercase : List[str] = padding_value
normed_input_values.append(UpperCAmelCase_ )
else:
_lowercase : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,):
_lowercase : List[str] = spectrogram(
UpperCAmelCase_ ,window=self.window ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,mel_filters=self.mel_filters ,mel_floor=self.mel_floor ,log_mel="""log10""" ,)
return log_mel_spec.T
def __call__( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
_lowercase : Tuple = self._process_audio(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,)
else:
_lowercase : Dict = None
if audio_target is not None:
_lowercase : Any = self._process_audio(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,)
if inputs is None:
return inputs_target
else:
_lowercase : List[Any] = inputs_target["""input_values"""]
_lowercase : Dict = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
_lowercase : Union[str, Any] = decoder_attention_mask
return inputs
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : Any = isinstance(UpperCAmelCase_ ,np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_lowercase : Tuple = is_batched_numpy or (
isinstance(UpperCAmelCase_ ,(list, tuple) ) and (isinstance(speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Union[str, Any] = [np.asarray(UpperCAmelCase_ ,dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(UpperCAmelCase_ ,np.ndarray ):
_lowercase : int = np.asarray(UpperCAmelCase_ ,dtype=np.floataa )
elif isinstance(UpperCAmelCase_ ,np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_lowercase : Union[str, Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : Dict = [speech]
# needed to make pad() work on spectrogram inputs
_lowercase : Dict = self.feature_size
# convert into correct format for padding
if is_target:
_lowercase : str = [self._extract_mel_features(UpperCAmelCase_ ) for waveform in speech]
_lowercase : Any = BatchFeature({"""input_values""": features} )
_lowercase : Any = self.num_mel_bins
else:
_lowercase : Optional[Any] = BatchFeature({"""input_values""": speech} )
_lowercase : List[Any] = self.pad(
UpperCAmelCase_ ,padding=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,pad_to_multiple_of=UpperCAmelCase_ ,return_attention_mask=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
_lowercase : Dict = feature_size_hack
# convert input values to correct format
_lowercase : Tuple = padded_inputs["""input_values"""]
if not isinstance(input_values[0] ,np.ndarray ):
_lowercase : Dict = [np.asarray(UpperCAmelCase_ ,dtype=np.floataa ) for array in input_values]
elif (
not isinstance(UpperCAmelCase_ ,np.ndarray )
and isinstance(input_values[0] ,np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_lowercase : Dict = [array.astype(np.floataa ) for array in input_values]
elif isinstance(UpperCAmelCase_ ,np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_lowercase : int = input_values.astype(np.floataa )
# convert attention_mask to correct format
_lowercase : Dict = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
_lowercase : Union[str, Any] = [np.asarray(UpperCAmelCase_ ,dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_lowercase : Union[str, Any] = (
attention_mask
if self._get_padding_strategies(UpperCAmelCase_ ,max_length=UpperCAmelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_lowercase : Union[str, Any] = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] ,attention_mask=UpperCAmelCase_ ,padding_value=self.padding_value )
if return_tensors is not None:
_lowercase : Optional[int] = padded_inputs.convert_to_tensors(UpperCAmelCase_ )
return padded_inputs
def lowerCamelCase__ ( self ):
_lowercase : Any = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_lowercase : Union[str, Any] = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 336 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = []
for line in lines:
_lowercase : Dict = re.sub(R"""#.*""" , """""" , __UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(__UpperCAmelCase )
_lowercase : Tuple = """\n""".join(__UpperCAmelCase )
# Make a hash from all this code
_lowercase : Tuple = full_str.encode("""utf-8""" )
return shaaaa(__UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase: Tuple = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase: List[str] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase: Any = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
UpperCAmelCase: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 336 | 1 |
"""simple docstring"""
import math
a_ = 1_0
a_ = 7
a_ = BALLS_PER_COLOUR * NUM_COLOURS
def __UpperCAmelCase ( __UpperCamelCase = 20 ):
__lowercase : str = math.comb(__UpperCamelCase , __UpperCamelCase )
__lowercase : List[str] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __UpperCamelCase )
__lowercase : Optional[int] = NUM_COLOURS * (1 - missing_colour / total)
return f"""{result:.9f}"""
if __name__ == "__main__":
print(solution(2_0))
| 249 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : Tuple = ShapEImgaImgPipeline
snake_case__ : Optional[Any] = ["""image"""]
snake_case__ : Union[str, Any] = ["""image"""]
snake_case__ : Optional[Any] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case__ : List[str] = False
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Optional[Any] ):
return self.time_input_dim * 4
@property
def _A ( self : Union[str, Any] ):
return 8
@property
def _A ( self : int ):
torch.manual_seed(0 )
UpperCamelCase :Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
UpperCamelCase :Optional[int] = CLIPVisionModel(__lowerCamelCase )
return model
@property
def _A ( self : str ):
UpperCamelCase :Optional[int] = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowerCamelCase , do_normalize=__lowerCamelCase , do_resize=__lowerCamelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def _A ( self : Tuple ):
torch.manual_seed(0 )
UpperCamelCase :Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCamelCase :int = PriorTransformer(**__lowerCamelCase )
return model
@property
def _A ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCamelCase :str = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCamelCase :List[str] = ShapERenderer(**__lowerCamelCase )
return model
def _A ( self : str ):
UpperCamelCase :int = self.dummy_prior
UpperCamelCase :Any = self.dummy_image_encoder
UpperCamelCase :Dict = self.dummy_image_processor
UpperCamelCase :List[Any] = self.dummy_renderer
UpperCamelCase :int = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=__lowerCamelCase , clip_sample=__lowerCamelCase , clip_sample_range=1.0 , )
UpperCamelCase :Optional[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _A ( self : int , __lowerCamelCase : int , __lowerCamelCase : Any=0 ):
UpperCamelCase :Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :List[Any] = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Optional[Any] = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _A ( self : List[str] ):
UpperCamelCase :Dict = """cpu"""
UpperCamelCase :List[Any] = self.get_dummy_components()
UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase )
UpperCamelCase :int = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
UpperCamelCase :Dict = output.images[0]
UpperCamelCase :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase :Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : List[Any] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _A ( self : List[Any] ):
UpperCamelCase :str = torch_device == """cpu"""
UpperCamelCase :int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , )
def _A ( self : List[Any] ):
UpperCamelCase :List[Any] = self.get_dummy_components()
UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase )
UpperCamelCase :List[Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Any = 1
UpperCamelCase :int = 2
UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase :str = batch_size * [inputs[key]]
UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase , num_images_per_prompt=__lowerCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Any ):
UpperCamelCase :Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
UpperCamelCase :Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
UpperCamelCase :Union[str, Any] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
UpperCamelCase :List[str] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase :Optional[int] = pipe(
__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 38 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Any = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowercase ( __lowerCAmelCase ):
lowercase__ : Union[str, Any] = "deformable_detr"
lowercase__ : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Union[str, Any] , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : List[Any]=None , _UpperCamelCase : List[str]=3 , _UpperCamelCase : Tuple=300 , _UpperCamelCase : List[Any]=1_024 , _UpperCamelCase : List[Any]=6 , _UpperCamelCase : Dict=1_024 , _UpperCamelCase : Tuple=8 , _UpperCamelCase : Optional[int]=6 , _UpperCamelCase : Any=1_024 , _UpperCamelCase : Optional[Any]=8 , _UpperCamelCase : int=0.0 , _UpperCamelCase : Dict=True , _UpperCamelCase : Dict="relu" , _UpperCamelCase : int=256 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : Optional[Any]=0.0_2 , _UpperCamelCase : List[Any]=1.0 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Tuple="sine" , _UpperCamelCase : Dict="resnet50" , _UpperCamelCase : Any=True , _UpperCamelCase : Dict=False , _UpperCamelCase : str=4 , _UpperCamelCase : Dict=4 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : List[str]=False , _UpperCamelCase : Any=300 , _UpperCamelCase : str=False , _UpperCamelCase : Dict=1 , _UpperCamelCase : Optional[int]=5 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : Union[str, Any]=1 , _UpperCamelCase : List[Any]=1 , _UpperCamelCase : List[str]=5 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Tuple=0.2_5 , _UpperCamelCase : int=False , **_UpperCamelCase : Any , ) -> List[str]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING['''resnet'''](out_features=["stage4"] )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = backbone_config.get("model_type" )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = use_timm_backbone
SCREAMING_SNAKE_CASE = backbone_config
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = init_xavier_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = auxiliary_loss
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = backbone
SCREAMING_SNAKE_CASE = use_pretrained_backbone
SCREAMING_SNAKE_CASE = dilation
# deformable attributes
SCREAMING_SNAKE_CASE = num_feature_levels
SCREAMING_SNAKE_CASE = encoder_n_points
SCREAMING_SNAKE_CASE = decoder_n_points
SCREAMING_SNAKE_CASE = two_stage
SCREAMING_SNAKE_CASE = two_stage_num_proposals
SCREAMING_SNAKE_CASE = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
SCREAMING_SNAKE_CASE = class_cost
SCREAMING_SNAKE_CASE = bbox_cost
SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE = mask_loss_coefficient
SCREAMING_SNAKE_CASE = dice_loss_coefficient
SCREAMING_SNAKE_CASE = bbox_loss_coefficient
SCREAMING_SNAKE_CASE = giou_loss_coefficient
SCREAMING_SNAKE_CASE = eos_coefficient
SCREAMING_SNAKE_CASE = focal_alpha
SCREAMING_SNAKE_CASE = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def __snake_case( self : int ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
return self.d_model
def __snake_case( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 357 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = SwinConfig(
embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=["stage2", "stage3", "stage4"] , )
SCREAMING_SNAKE_CASE = DetaConfig(
backbone_config=UpperCAmelCase__ , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=UpperCAmelCase__ , with_box_refine=UpperCAmelCase__ , two_stage=UpperCAmelCase__ , )
# set labels
SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "o365" in model_name:
SCREAMING_SNAKE_CASE = 3_6_6
SCREAMING_SNAKE_CASE = "object365-id2label.json"
else:
SCREAMING_SNAKE_CASE = 9_1
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="dataset" ) ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-dim :]
# fmt: on
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ):
# transformer decoder self-attention layers
SCREAMING_SNAKE_CASE = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:hidden_size, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
hidden_size : hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE = in_proj_weight[-hidden_size:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-hidden_size:]
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = get_deta_config(UpperCAmelCase__ )
# load original state dict
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F"Model name {model_name} not supported" )
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(UpperCAmelCase__ , param.shape )
# rename keys
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_swin_q_k_v(UpperCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
if "input_proj" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = DetaForObjectDetection(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = "cuda" if torch.cuda.is_available() else "cpu"
model.to(UpperCAmelCase__ )
# load image processor
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = processor(images=UpperCAmelCase__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
SCREAMING_SNAKE_CASE = model(pixel_values.to(UpperCAmelCase__ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCAmelCase__ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCAmelCase__ ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : Any = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 206 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase__ ( _A , _A ):
a : List[Any] = u
for i in range(1 , _A ):
a : Any = temp * (u - i)
return temp
def lowerCamelCase__ ( ):
a : Tuple = int(input('enter the numbers of values: ' ) )
a : list[list[float]] = []
for _ in range(_A ):
y.append([] )
for i in range(_A ):
for j in range(_A ):
y[i].append(_A )
a : Optional[Any] = 0
print('enter the values of parameters in a list: ' )
a : Optional[Any] = list(map(_A , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(_A ):
a : List[Any] = float(input() )
a : Any = int(input('enter the value to interpolate: ' ) )
a : Union[str, Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _A ):
for j in range(n - i ):
a : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
a : Dict = y[0][0]
for i in range(1 , _A ):
summ += (ucal(_A , _A ) * y[0][i]) / math.factorial(_A )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main() | 297 |
'''simple docstring'''
def lowerCamelCase__ ( _A ):
return 10 - x * x
def lowerCamelCase__ ( _A , _A ):
# Bolzano theory in order to find if there is a root between a and b
if equation(_A ) * equation(_A ) >= 0:
raise ValueError('Wrong space!' )
a : Tuple = a
while (b - a) >= 0.01:
# Find middle point
a : Tuple = (a + b) / 2
# Check if middle point is root
if equation(_A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_A ) * equation(_A ) < 0:
a : List[str] = c
else:
a : Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 297 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowercase : Dict = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase__ ( __a ):
lowerCAmelCase_ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 2_55 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowercase_ : str = size if size is not None else {'''shortest_edge''': 2_24}
lowercase_ : Tuple = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase_ : Union[str, Any] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name='''crop_size''' )
lowercase_ : List[str] = do_resize
lowercase_ : int = size
lowercase_ : Optional[Any] = resample
lowercase_ : int = do_center_crop
lowercase_ : int = crop_size
lowercase_ : List[str] = do_rescale
lowercase_ : Dict = rescale_factor
lowercase_ : Optional[Any] = do_normalize
lowercase_ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase_ : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase_ : Optional[Any] = do_convert_rgb
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase_ : int = get_resize_output_image_size(UpperCamelCase__ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Union[str, Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : str = do_resize if do_resize is not None else self.do_resize
lowercase_ : Optional[Any] = size if size is not None else self.size
lowercase_ : Union[str, Any] = get_size_dict(UpperCamelCase__ , param_name='''size''' , default_to_square=UpperCamelCase__ )
lowercase_ : Union[str, Any] = resample if resample is not None else self.resample
lowercase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Dict = crop_size if crop_size is not None else self.crop_size
lowercase_ : List[Any] = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' , default_to_square=UpperCamelCase__ )
lowercase_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Any = image_mean if image_mean is not None else self.image_mean
lowercase_ : int = image_std if image_std is not None else self.image_std
lowercase_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ : str = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ : Dict = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowercase_ : Dict = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ : Union[str, Any] = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ : Tuple = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ : Optional[Any] = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ : Optional[int] = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
lowercase_ : int = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 369 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase : Any = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 264 | 0 |
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
a : int = 'Create a default config file for Accelerate with only a few flags set.'
def lowerCAmelCase_ (lowerCAmelCase__: Tuple="no" , lowerCAmelCase__: str = default_json_config_file , lowerCAmelCase__: bool = False ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = Path(__a )
path.parent.mkdir(parents=__a , exist_ok=__a )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
UpperCAmelCase_: int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
UpperCAmelCase_: List[Any] = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase_: Optional[int] = torch.cuda.device_count()
UpperCAmelCase_: int = num_gpus
UpperCAmelCase_: Any = False
if num_gpus > 1:
UpperCAmelCase_: Union[str, Any] = 'MULTI_GPU'
else:
UpperCAmelCase_: List[str] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase_: Dict = torch.xpu.device_count()
UpperCAmelCase_: Tuple = num_xpus
UpperCAmelCase_: List[Any] = False
if num_xpus > 1:
UpperCAmelCase_: Union[str, Any] = 'MULTI_XPU'
else:
UpperCAmelCase_: Any = 'NO'
elif is_npu_available():
UpperCAmelCase_: int = torch.npu.device_count()
UpperCAmelCase_: Optional[Any] = num_npus
UpperCAmelCase_: List[Any] = False
if num_npus > 1:
UpperCAmelCase_: List[Any] = 'MULTI_NPU'
else:
UpperCAmelCase_: List[str] = 'NO'
else:
UpperCAmelCase_: Dict = 0
UpperCAmelCase_: int = True
UpperCAmelCase_: Optional[int] = 1
UpperCAmelCase_: Dict = 'NO'
UpperCAmelCase_: Optional[int] = ClusterConfig(**__a )
config.to_json_file(__a )
return path
def lowerCAmelCase_ (lowerCAmelCase__: str , lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: Any = parser.add_parser("""default""" , parents=__a , help=__a , formatter_class=__a )
parser.add_argument(
"""--config_file""" , default=__a , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=__a , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=__a )
return parser
def lowerCAmelCase_ (lowerCAmelCase__: List[str] ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 147 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = """▁"""
__lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
__lowerCAmelCase = {"""vinai/bartpho-syllable""": 1_0_2_4}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : str ,_a : str ,_a : Any ,_a : Any="<s>" ,_a : Dict="</s>" ,_a : int="</s>" ,_a : Union[str, Any]="<s>" ,_a : List[Any]="<unk>" ,_a : Optional[Any]="<pad>" ,_a : List[str]="<mask>" ,_a : Optional[Dict[str, Any]] = None ,**_a : int ,):
'''simple docstring'''
_a : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
_a : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
_a : Optional[int] = vocab_file
_a : Union[str, Any] = monolingual_vocab_file
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : Union[str, Any] = {}
_a : int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_a ) not in self.fairseq_tokens_to_ids:
_a : int = cnt
cnt += 1
with open(_a ,'r' ,encoding='utf-8' ) as f:
for line in f.readlines():
_a : str = line.strip().split()[0]
_a : Tuple = len(self.fairseq_tokens_to_ids )
if str(_a ) not in self.fairseq_tokens_to_ids:
_a : List[str] = len(self.fairseq_tokens_to_ids )
_a : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
_a : int = self.__dict__.copy()
_a : str = None
_a : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple ,_a : Tuple ):
'''simple docstring'''
_a : Tuple = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : List[str] = {}
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowercase ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Dict = [self.cls_token_id]
_a : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __lowercase ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : Tuple ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def __lowercase ( self : Union[str, Any] ,_a : Union[str, Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __lowercase ( self : Any ,_a : int ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __lowercase ( self : Tuple ,_a : Union[str, Any] ):
'''simple docstring'''
_a : str = ''.join(_a ).replace(_a ,' ' ).strip()
return out_string
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : int = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_a : int = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,'wb' ) as fi:
_a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_a ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,_a )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_a ,'w' ,encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(_a )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 271 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCamelCase :
@staticmethod
def SCREAMING_SNAKE_CASE__ (*__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Dict):
pass
@is_pipeline_test
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def SCREAMING_SNAKE_CASE__ (self : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]):
A = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa")
A = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def SCREAMING_SNAKE_CASE__ (self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any):
A = vqa_pipeline(__SCREAMING_SNAKE_CASE , top_k=1)
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
[{"score": ANY(__SCREAMING_SNAKE_CASE), "answer": ANY(__SCREAMING_SNAKE_CASE)}],
[{"score": ANY(__SCREAMING_SNAKE_CASE), "answer": ANY(__SCREAMING_SNAKE_CASE)}],
] , )
@require_torch
def SCREAMING_SNAKE_CASE__ (self : Any):
A = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa")
A = "./tests/fixtures/tests_samples/COCO/000000039769.png"
A = "How many cats are there?"
A = vqa_pipeline(image=__SCREAMING_SNAKE_CASE , question="How many cats are there?" , top_k=2)
self.assertEqual(
__SCREAMING_SNAKE_CASE , [{"score": ANY(__SCREAMING_SNAKE_CASE), "answer": ANY(__SCREAMING_SNAKE_CASE)}, {"score": ANY(__SCREAMING_SNAKE_CASE), "answer": ANY(__SCREAMING_SNAKE_CASE)}])
A = vqa_pipeline({"image": image, "question": question} , top_k=2)
self.assertEqual(
__SCREAMING_SNAKE_CASE , [{"score": ANY(__SCREAMING_SNAKE_CASE), "answer": ANY(__SCREAMING_SNAKE_CASE)}, {"score": ANY(__SCREAMING_SNAKE_CASE), "answer": ANY(__SCREAMING_SNAKE_CASE)}])
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ (self : str):
A = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa")
A = "./tests/fixtures/tests_samples/COCO/000000039769.png"
A = "How many cats are there?"
A = vqa_pipeline(image=__SCREAMING_SNAKE_CASE , question=__SCREAMING_SNAKE_CASE , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}])
A = vqa_pipeline({"image": image, "question": question} , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}])
A = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [[{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF")
def SCREAMING_SNAKE_CASE__ (self : List[str]):
pass
| 354 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
# Load configuration defined in the metadata file
with open(lowercase__ ) as metadata_file:
A = json.load(lowercase__ )
A = LukeConfig(use_entity_aware_attention=lowercase__ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
A = torch.load(lowercase__ , map_location="cpu" )
# Load the entity vocab file
A = load_entity_vocab(lowercase__ )
A = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
A = AddedToken("<ent>" , lstrip=lowercase__ , rstrip=lowercase__ )
A = AddedToken("<ent2>" , lstrip=lowercase__ , rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
A = LukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
A = state_dict["embeddings.word_embeddings.weight"]
A = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
A = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
A = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A = F"""encoder.layer.{layer_index}.attention.self."""
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
A = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A = state_dict["entity_embeddings.entity_embeddings.weight"]
A = entity_emb[entity_vocab["[MASK]"]]
A = LukeModel(config=lowercase__ ).eval()
A , A = model.load_state_dict(lowercase__ , strict=lowercase__ )
if not (len(lowercase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {", ".join(lowercase__ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
A = LukeTokenizer.from_pretrained(lowercase__ , task="entity_classification" )
A = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
A = (39, 42)
A = tokenizer(lowercase__ , entity_spans=[span] , add_prefix_space=lowercase__ , return_tensors="pt" )
A = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
A = torch.Size((1, 42, 1_024) )
A = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
A = torch.Size((1, 42, 768) )
A = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
A = torch.Size((1, 1, 1_024) )
A = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
A = torch.Size((1, 1, 768) )
A = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = {}
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(lowercase__ ):
A , A = line.rstrip().split("\t" )
A = index
return entity_vocab
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__A : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 57 | 0 |
import os
def __snake_case ( ):
__a = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
__a = os.path.join(_UpperCAmelCase , '''triangle.txt''' )
with open(_UpperCAmelCase ) as f:
__a = f.readlines()
__a = []
for line in triangle:
__a = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(_UpperCAmelCase ) )
a.append(_UpperCAmelCase )
for i in range(1 , len(_UpperCAmelCase ) ):
for j in range(len(a[i] ) ):
__a = a[i - 1][j] if j != len(a[i - 1] ) else 0
__a = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_UpperCAmelCase , _UpperCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 49 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase : Any = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowerCAmelCase : Any = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowerCAmelCase : Any = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def _lowerCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None , _a="warn" , ):
"""simple docstring"""
lowerCamelCase = recall_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a , zero_division=_a , )
return {"recall": float(_a ) if score.size == 1 else score}
| 291 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[str] =AutoencoderKL
lowercase : Optional[int] ='sample'
lowercase : Dict =1E-2
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =4
lowerCamelCase_ =3
lowerCamelCase_ =(32, 32)
lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowerCamelCase_ =self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skipIf(torch_device == '''mps''', '''Gradient checkpointing skipped on MPS''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.prepare_init_args_and_inputs_for_common()
lowerCamelCase_ =self.model_class(**lowerCAmelCase )
model.to(lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
lowerCamelCase_ =model(**lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowerCamelCase_ =torch.randn_like(lowerCAmelCase )
lowerCamelCase_ =(out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowerCamelCase_ =self.model_class(**lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowerCamelCase_ =model_a(**lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowerCamelCase_ =(out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowerCamelCase_ =dict(model.named_parameters() )
lowerCamelCase_ =dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data, named_params_a[name].grad.data, atol=5e-5 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''', output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ), 0 )
model.to(lowerCAmelCase )
lowerCamelCase_ =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
lowerCamelCase_ =model.to(lowerCAmelCase )
model.eval()
if torch_device == "mps":
lowerCamelCase_ =torch.manual_seed(0 )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ =torch.randn(
1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0 ), )
lowerCamelCase_ =image.to(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase, sample_posterior=lowerCAmelCase, generator=lowerCAmelCase ).sample
lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowerCamelCase_ =torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
lowerCamelCase_ =torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
lowerCamelCase_ =torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCAmelCase, lowerCAmelCase, rtol=1e-2 ) )
@slow
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase ) for s in shape] )}.npy'''
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self, lowerCAmelCase=0, lowerCAmelCase=(4, 3, 512, 512), lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =torch.floataa if fpaa else torch.floataa
lowerCamelCase_ =torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCAmelCase, lowerCAmelCase ) ) ).to(lowerCAmelCase ).to(lowerCAmelCase )
return image
def lowercase__ ( self, lowerCAmelCase="CompVis/stable-diffusion-v1-4", lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ ='''fp16''' if fpaa else None
lowerCamelCase_ =torch.floataa if fpaa else torch.floataa
lowerCamelCase_ =AutoencoderKL.from_pretrained(
lowerCAmelCase, subfolder='''vae''', torch_dtype=lowerCAmelCase, revision=lowerCAmelCase, )
model.to(lowerCAmelCase ).eval()
return model
def lowercase__ ( self, lowerCAmelCase=0 ):
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(lowerCAmelCase )
return torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_sd_vae_model()
lowerCamelCase_ =self.get_sd_image(lowerCAmelCase )
lowerCamelCase_ =self.get_generator(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase, generator=lowerCAmelCase, sample_posterior=lowerCAmelCase ).sample
assert sample.shape == image.shape
lowerCamelCase_ =sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCamelCase_ =torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_sd_vae_model(fpaa=lowerCAmelCase )
lowerCamelCase_ =self.get_sd_image(lowerCAmelCase, fpaa=lowerCAmelCase )
lowerCamelCase_ =self.get_generator(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase, generator=lowerCAmelCase, sample_posterior=lowerCAmelCase ).sample
assert sample.shape == image.shape
lowerCamelCase_ =sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCamelCase_ =torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_sd_vae_model()
lowerCamelCase_ =self.get_sd_image(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).sample
assert sample.shape == image.shape
lowerCamelCase_ =sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCamelCase_ =torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_sd_vae_model()
lowerCamelCase_ =self.get_sd_image(lowerCAmelCase, shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCamelCase_ =model.decode(lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowerCamelCase_ =sample[-1, -2:, :2, -2:].flatten().cpu()
lowerCamelCase_ =torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_sd_vae_model(fpaa=lowerCAmelCase )
lowerCamelCase_ =self.get_sd_image(lowerCAmelCase, shape=(3, 4, 64, 64), fpaa=lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model.decode(lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowerCamelCase_ =sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCamelCase_ =torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available(), reason='''xformers is not required when using PyTorch 2.0.''' )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_sd_vae_model(fpaa=lowerCAmelCase )
lowerCamelCase_ =self.get_sd_image(lowerCAmelCase, shape=(3, 4, 64, 64), fpaa=lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model.decode(lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCamelCase_ =model.decode(lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available(), reason='''xformers is not required when using PyTorch 2.0.''' )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_sd_vae_model()
lowerCamelCase_ =self.get_sd_image(lowerCAmelCase, shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCamelCase_ =model.decode(lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCamelCase_ =model.decode(lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.get_sd_vae_model()
lowerCamelCase_ =self.get_sd_image(lowerCAmelCase )
lowerCamelCase_ =self.get_generator(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model.encode(lowerCAmelCase ).latent_dist
lowerCamelCase_ =dist.sample(generator=lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowerCamelCase_ =sample[0, -1, -3:, -3:].flatten().cpu()
lowerCamelCase_ =torch.tensor(lowerCAmelCase )
lowerCamelCase_ =3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=lowerCAmelCase )
| 6 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =search_prob
lowerCamelCase_ =start_temperate
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =None
while not search_end:
lowerCamelCase_ =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ =current_state
scores.append(__snake_case )
iterations += 1
lowerCamelCase_ =None
lowerCamelCase_ =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
lowerCamelCase_ =neighbors.pop(__snake_case )
lowerCamelCase_ =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ =picked_neighbor
else:
lowerCamelCase_ =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ =picked_neighbor
lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ =True
else:
lowerCamelCase_ =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : List[str] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 6 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( a_ , unittest.TestCase ):
snake_case__ = KandinskyVaaControlnetPipeline
snake_case__ = ["image_embeds", "negative_image_embeds", "hint"]
snake_case__ = ["image_embeds", "negative_image_embeds", "hint"]
snake_case__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return 32
@property
def lowerCamelCase__ ( self : Dict ):
return 32
@property
def lowerCamelCase__ ( self : List[str] ):
return self.time_input_dim
@property
def lowerCamelCase__ ( self : Tuple ):
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self : List[str] ):
return 100
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
__lowerCamelCase : List[str] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCamelCase : List[Any] = UNetaDConditionModel(**UpperCAmelCase )
return model
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self : int ):
torch.manual_seed(0 )
__lowerCamelCase : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : List[str] = self.dummy_unet
__lowerCamelCase : Any = self.dummy_movq
__lowerCamelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCAmelCase , )
__lowerCamelCase : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=0 ):
__lowerCamelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
__lowerCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase )
# create hint
__lowerCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if str(UpperCAmelCase ).startswith("mps" ):
__lowerCamelCase : Union[str, Any] = torch.manual_seed(UpperCAmelCase )
else:
__lowerCamelCase : str = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCamelCase : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Optional[Any] = """cpu"""
__lowerCamelCase : List[Any] = self.get_dummy_components()
__lowerCamelCase : str = self.pipeline_class(**UpperCAmelCase )
__lowerCamelCase : List[str] = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Tuple = pipe(**self.get_dummy_inputs(UpperCAmelCase ) )
__lowerCamelCase : int = output.images
__lowerCamelCase : Optional[Any] = pipe(
**self.get_dummy_inputs(UpperCAmelCase ) , return_dict=UpperCAmelCase , )[0]
__lowerCamelCase : Any = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : int = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" )
__lowerCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
__lowerCamelCase : Dict = torch.from_numpy(np.array(UpperCAmelCase ) ).float() / 2_5_5.0
__lowerCamelCase : List[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__lowerCamelCase : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase )
__lowerCamelCase : int = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
__lowerCamelCase : Optional[Any] = pipeline.to(UpperCAmelCase )
pipeline.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = """A robot, 4k photo"""
__lowerCamelCase : List[str] = torch.Generator(device="cuda" ).manual_seed(0 )
__lowerCamelCase : List[Any] = pipe_prior(
UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCamelCase : List[Any] = torch.Generator(device="cuda" ).manual_seed(0 )
__lowerCamelCase : Optional[Any] = pipeline(
image_embeds=UpperCAmelCase , negative_image_embeds=UpperCAmelCase , hint=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=100 , output_type="np" , )
__lowerCamelCase : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase ) | 135 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowerCamelCase ( a_ ):
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self._create_example_records()
lowerCAmelCase__ : Tuple = Dataset.from_list(UpperCamelCase )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(UpperCamelCase ):
self.assertDictEqual(UpperCamelCase , example_records[i] )
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self._create_example_records()
lowerCAmelCase__ : Optional[Any] = Dataset.from_list(UpperCamelCase )
lowerCAmelCase__ : int = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _lowerCAmelCase ( self : Tuple ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
lowerCAmelCase__ : str = [{"""col_1""": 1}, {"""col_2""": """x"""}]
lowerCAmelCase__ : int = Dataset.from_list(UpperCamelCase )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def _lowerCAmelCase ( self : str ) -> Dict: # checks if the type can be inferred from the second record
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
lowerCAmelCase__ : Optional[int] = Dataset.from_list(UpperCamelCase )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = Dataset.from_list([] )
self.assertEqual(len(UpperCamelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 242 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : str = analyze_text(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase : Dict = sum(single_char_strings.values() )
# one length string
lowerCAmelCase : Dict = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase : Optional[Any] = single_char_strings[ch]
lowerCAmelCase : str = my_str / all_sum
my_fir_sum += prob * math.loga(SCREAMING_SNAKE_CASE ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowerCAmelCase : Any = sum(two_char_strings.values() )
lowerCAmelCase : Union[str, Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase : int = cha + cha
if sequence in two_char_strings:
lowerCAmelCase : Optional[int] = two_char_strings[sequence]
lowerCAmelCase : Dict = int(SCREAMING_SNAKE_CASE ) / all_sum
my_sec_sum += prob * math.loga(SCREAMING_SNAKE_CASE )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[str] = Counter() # type: ignore
lowerCAmelCase : Dict = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 133 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase__ = '''src/diffusers'''
lowerCAmelCase__ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCAmelCase__ = spec.loader.load_module()
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return line.startswith(SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , SCREAMING_SNAKE_CASE ) is not None
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Dict = object_name.split("." )
lowerCAmelCase : Optional[int] = 0
# First let's find the module where our object lives.
lowerCAmelCase : Any = parts[i]
while i < len(SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , f"""{module}.py""" ) ):
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(SCREAMING_SNAKE_CASE , f"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase : List[Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase : List[str] = ""
lowerCAmelCase : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(SCREAMING_SNAKE_CASE ) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase : List[str] = line_index
while line_index < len(SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase : List[Any] = lines[start_index:line_index]
return "".join(SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowerCAmelCase__ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowerCAmelCase__ = re.compile(r'''<FILL\s+[^>]*>''')
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : int = code.split("\n" )
lowerCAmelCase : List[str] = 0
while idx < len(SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(SCREAMING_SNAKE_CASE ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = len(get_indent(SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
lowerCAmelCase : Tuple = f"""class Bla:\n{code}"""
lowerCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = black.format_str(SCREAMING_SNAKE_CASE , mode=SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : List[Any] = style_docstrings_in_code(SCREAMING_SNAKE_CASE )
return result[len("class Bla:\n" ) :] if has_indent else result
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int=False ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase : int = f.readlines()
lowerCAmelCase : List[str] = []
lowerCAmelCase : str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = search.groups()
lowerCAmelCase : List[str] = find_code_in_diffusers(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = get_indent(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase : Optional[int] = theoretical_indent
lowerCAmelCase : List[str] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase : str = True
while line_index < len(SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
break
lowerCAmelCase : Tuple = lines[line_index]
lowerCAmelCase : str = _should_continue(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and re.search(f"""^{indent}# End copy""" , SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase : Tuple = lines[start_index:line_index]
lowerCAmelCase : List[str] = "".join(SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase : List[str] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE ) is None]
lowerCAmelCase : Union[str, Any] = "\n".join(SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase : str = replace_pattern.replace("with" , "" ).split("," )
lowerCAmelCase : List[str] = [_re_replace_pattern.search(SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = pattern.groups()
lowerCAmelCase : List[Any] = re.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
lowerCAmelCase : Optional[Any] = re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
lowerCAmelCase : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCAmelCase : Tuple = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase : int = start_index + 1
if overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE )
return diffs
def a__ ( SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
lowerCAmelCase : List[Any] = glob.glob(os.path.join(SCREAMING_SNAKE_CASE , "**/*.py" ) , recursive=SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = []
for filename in all_files:
lowerCAmelCase : List[Any] = is_copy_consistent(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase : List[Any] = "\n".join(SCREAMING_SNAKE_CASE )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 133 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# Initialise PyTorch model
__UpperCAmelCase = FunnelConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
__UpperCAmelCase = FunnelBaseModel(SCREAMING_SNAKE_CASE ) if base_model else FunnelModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
A_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 333 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : str = logging.get_logger(__name__)
A_ : str = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : int = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : Tuple = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 333 | 1 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
_A = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
for attribute in key.split('.' ):
__UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
__UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
__UpperCamelCase =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
__UpperCamelCase =value
elif weight_type == "weight_g":
__UpperCamelCase =value
elif weight_type == "weight_v":
__UpperCamelCase =value
elif weight_type == "bias":
__UpperCamelCase =value
else:
__UpperCamelCase =value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =[]
__UpperCamelCase =fairseq_model.state_dict()
__UpperCamelCase =hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase =False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == 'group' , )
__UpperCamelCase =True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase ='unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
__UpperCamelCase =True
if "*" in mapped_key:
__UpperCamelCase =name.split(SCREAMING_SNAKE_CASE__ )[0].split('.' )[-2]
__UpperCamelCase =mapped_key.replace('*' , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
__UpperCamelCase ='weight_g'
elif "weight_v" in name:
__UpperCamelCase ='weight_v'
elif "bias" in name:
__UpperCamelCase ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase ='weight'
else:
__UpperCamelCase =None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'Unused weights: {unused_weights}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =full_name.split('conv_layers.' )[-1]
__UpperCamelCase =name.split('.' )
__UpperCamelCase =int(items[0] )
__UpperCamelCase =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__UpperCamelCase =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__UpperCamelCase =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
__UpperCamelCase =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
__UpperCamelCase =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=True ):
if config_path is not None:
__UpperCamelCase =UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =UniSpeechSatConfig()
__UpperCamelCase =''
if is_finetuned:
__UpperCamelCase =UniSpeechSatForCTC(SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =UniSpeechSatForPreTraining(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__UpperCamelCase =model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_A = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 117 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "MCTCTFeatureExtractor"
UpperCAmelCase__ : str = "AutoTokenizer"
def __init__( self , A_ , A_ ) -> Dict:
super().__init__(A_ , A_ )
__UpperCamelCase =self.feature_extractor
__UpperCamelCase =False
def __call__( self , *A_ , **A_ ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A_ , **A_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__UpperCamelCase =kwargs.pop('raw_speech' )
else:
__UpperCamelCase =kwargs.pop('audio' , A_ )
__UpperCamelCase =kwargs.pop('sampling_rate' , A_ )
__UpperCamelCase =kwargs.pop('text' , A_ )
if len(A_ ) > 0:
__UpperCamelCase =args[0]
__UpperCamelCase =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__UpperCamelCase =self.feature_extractor(A_ , *A_ , sampling_rate=A_ , **A_ )
if text is not None:
__UpperCamelCase =self.tokenizer(A_ , **A_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__UpperCamelCase =encodings['input_ids']
return inputs
def _a ( self , *A_ , **A_ ) -> str:
return self.tokenizer.batch_decode(*A_ , **A_ )
def _a ( self , *A_ , **A_ ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A_ , **A_ )
__UpperCamelCase =kwargs.pop('input_features' , A_ )
__UpperCamelCase =kwargs.pop('labels' , A_ )
if len(A_ ) > 0:
__UpperCamelCase =args[0]
__UpperCamelCase =args[1:]
if input_features is not None:
__UpperCamelCase =self.feature_extractor.pad(A_ , *A_ , **A_ )
if labels is not None:
__UpperCamelCase =self.tokenizer.pad(A_ , **A_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__UpperCamelCase =labels['input_ids']
return input_features
def _a ( self , *A_ , **A_ ) -> Optional[int]:
return self.tokenizer.decode(*A_ , **A_ )
@contextmanager
def _a ( self ) -> str:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer
yield
__UpperCamelCase =self.feature_extractor
__UpperCamelCase =False
| 117 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class __lowerCAmelCase :
def __init__( self :Optional[Any] , __magic_name__ :int ):
'''simple docstring'''
a = value
a = None
a = None
class __lowerCAmelCase :
def __init__( self :str , __magic_name__ :Node ):
'''simple docstring'''
a = tree
def lowerCamelCase__ ( self :str , __magic_name__ :Node | None ):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self :Tuple ):
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228 |
from __future__ import annotations
def __A ( __lowerCamelCase , __lowerCamelCase = None ) -> list[list[str]]:
a = word_bank or []
# create a table
a = len(__lowerCamelCase ) + 1
a = []
for _ in range(__lowerCamelCase ):
table.append([] )
# seed value
a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowerCamelCase )] == word:
a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowerCamelCase )]:
combination.reverse()
return table[len(__lowerCamelCase )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 228 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = """ssube/stable-diffusion-x4-upscaler-onnx"""
def lowerCAmelCase ( self : Any , UpperCamelCase__ : Union[str, Any]=0 ) -> Dict:
"""simple docstring"""
snake_case : Dict = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCamelCase__ ) )
snake_case : Any = torch.manual_seed(UpperCamelCase__ )
snake_case : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : int = self.get_dummy_inputs()
snake_case : List[str] = pipe(**UpperCamelCase__ ).images
snake_case : List[str] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
snake_case : int = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Optional[int] = self.get_dummy_inputs()
snake_case : Union[str, Any] = pipe(**UpperCamelCase__ ).images
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case : Any = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Dict = self.get_dummy_inputs()
snake_case : List[Any] = pipe(**UpperCamelCase__ ).images
snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case : Optional[Any] = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
snake_case : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Dict = self.get_dummy_inputs()
snake_case : Any = pipe(**UpperCamelCase__ ).images
snake_case : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case : Optional[Any] = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
snake_case : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Any = self.get_dummy_inputs()
snake_case : Union[str, Any] = pipe(**UpperCamelCase__ ).images
snake_case : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case : Any = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case : List[str] = ort.SessionOptions()
snake_case : List[str] = False
return options
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case : Any = init_image.resize((128, 128) )
# using the PNDM scheduler by default
snake_case : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
snake_case : str = torch.manual_seed(0 )
snake_case : List[str] = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type='''np''' , )
snake_case : int = output.images
snake_case : str = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
snake_case : Optional[int] = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case : List[Any] = init_image.resize((128, 128) )
snake_case : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
snake_case : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Optional[Any] = '''A fantasy landscape, trending on artstation'''
snake_case : List[Any] = torch.manual_seed(0 )
snake_case : List[str] = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase__ , output_type='''np''' , )
snake_case : str = output.images
snake_case : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
snake_case : int = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 367 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=13 , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : str=5 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[Any]=10 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : str=0.9 , UpperCamelCase__ : Any=None , ) -> Tuple:
"""simple docstring"""
snake_case : List[Any] = parent
snake_case : Tuple = batch_size
snake_case : str = image_size
snake_case : Tuple = num_channels
snake_case : List[Any] = patch_size
snake_case : Optional[Any] = tubelet_size
snake_case : Tuple = num_frames
snake_case : Optional[Any] = is_training
snake_case : Tuple = use_labels
snake_case : List[str] = hidden_size
snake_case : Any = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : List[Any] = intermediate_size
snake_case : Tuple = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : Any = mask_ratio
snake_case : Optional[int] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
snake_case : Dict = (image_size // patch_size) ** 2
snake_case : Optional[int] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
snake_case : Optional[int] = int(mask_ratio * self.seq_length )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
snake_case : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case : Tuple = None
if self.use_labels:
snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Dict = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = VideoMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
snake_case : Any = VideoMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case : int = torch.ones((self.num_masks,) )
snake_case : List[str] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
snake_case : Tuple = mask.expand(self.batch_size , -1 ).bool()
snake_case : str = model(UpperCamelCase__ , UpperCamelCase__ )
# model only returns predictions for masked patches
snake_case : Tuple = mask.sum().item()
snake_case : Dict = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case : Optional[int] = config_and_inputs
snake_case : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
snake_case : List[Any] = VideoMAEModelTester(self )
snake_case : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int=False ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = copy.deepcopy(UpperCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case : Optional[int] = torch.ones((self.model_tester.num_masks,) )
snake_case : int = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
snake_case : Dict = mask.expand(self.model_tester.batch_size , -1 ).bool()
snake_case : Optional[int] = bool_masked_pos.to(UpperCamelCase__ )
if return_labels:
if model_class in [
*get_values(UpperCamelCase__ ),
]:
snake_case : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
snake_case ,snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(UpperCamelCase__ )
snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : int = VideoMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
if not self.has_attentions:
pass
else:
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Optional[int] = True
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
snake_case : List[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
snake_case : Dict = True
snake_case : List[str] = False
snake_case : Tuple = True
snake_case : List[str] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : List[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : List[Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case : Any = True
snake_case : Any = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Dict = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : int = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
snake_case : Any = len(UpperCamelCase__ )
# Check attention is always last and order is fine
snake_case : Union[str, Any] = True
snake_case : Union[str, Any] = True
snake_case : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
snake_case : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ):
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : Union[str, Any] = outputs.hidden_states
snake_case : Optional[int] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
snake_case : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
snake_case : int = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case ,snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : List[str] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def _UpperCamelCase ( ) -> str:
'''simple docstring'''
snake_case : int = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
snake_case : str = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
snake_case : Tuple = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
UpperCamelCase__ )
snake_case : str = self.default_image_processor
snake_case : Dict = prepare_video()
snake_case : int = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case : int = model(**UpperCamelCase__ )
# verify the logits
snake_case : Optional[int] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
snake_case : Optional[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
snake_case : List[str] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(UpperCamelCase__ )
snake_case : str = self.default_image_processor
snake_case : Tuple = prepare_video()
snake_case : List[Any] = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# add boolean mask, indicating which patches to mask
snake_case : str = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
snake_case : Dict = torch.load(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case : Tuple = model(**UpperCamelCase__ )
# verify the logits
snake_case : str = torch.Size([1, 1408, 1536] )
snake_case : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase__ )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
snake_case : Any = torch.tensor([0.5_142] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
snake_case : str = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=UpperCamelCase__ ).to(
UpperCamelCase__ )
with torch.no_grad():
snake_case : Optional[int] = model(**UpperCamelCase__ )
snake_case : str = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
| 83 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = ["""input_values""", """attention_mask"""]
def __init__( self : Union[str, Any], __A : int = 1, __A : int = 1_6_0_0_0, __A : float = 0.0, __A : bool = False, __A : int = 8_0, __A : int = 1_6, __A : int = 6_4, __A : str = "hann_window", __A : float = 1.0, __A : float = 8_0, __A : float = 7_6_0_0, __A : float = 1E-10, __A : int = 2, __A : bool = True, **__A : Dict, ):
super().__init__(feature_size=__A, sampling_rate=__A, padding_value=__A, **__A )
UpperCAmelCase : Optional[Any] = do_normalize
UpperCAmelCase : Dict = return_attention_mask
UpperCAmelCase : Union[str, Any] = num_mel_bins
UpperCAmelCase : str = hop_length
UpperCAmelCase : List[str] = win_length
UpperCAmelCase : Optional[Any] = win_function
UpperCAmelCase : Dict = frame_signal_scale
UpperCAmelCase : List[Any] = fmin
UpperCAmelCase : Any = fmax
UpperCAmelCase : str = mel_floor
UpperCAmelCase : str = reduction_factor
UpperCAmelCase : Union[str, Any] = win_length * sampling_rate // 1_0_0_0
UpperCAmelCase : int = hop_length * sampling_rate // 1_0_0_0
UpperCAmelCase : int = optimal_fft_length(self.sample_size )
UpperCAmelCase : int = (self.n_fft // 2) + 1
UpperCAmelCase : Tuple = window_function(window_length=self.sample_size, name=self.win_function, periodic=__A )
UpperCAmelCase : Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs, num_mel_filters=self.num_mel_bins, min_frequency=self.fmin, max_frequency=self.fmax, sampling_rate=self.sampling_rate, norm='''slaney''', mel_scale='''slaney''', )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''', __A, )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''', __A, )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __magic_name__ ( __A : List[np.ndarray], __A : List[np.ndarray], __A : float = 0.0 ):
if attention_mask is not None:
UpperCAmelCase : List[str] = np.array(__A, np.intaa )
UpperCAmelCase : List[Any] = []
for vector, length in zip(__A, attention_mask.sum(-1 ) ):
UpperCAmelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase : Tuple = padding_value
normed_input_values.append(__A )
else:
UpperCAmelCase : Any = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __magic_name__ ( self : List[str], __A : np.ndarray, ):
UpperCAmelCase : List[str] = spectrogram(
__A, window=self.window, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, mel_filters=self.mel_filters, mel_floor=self.mel_floor, log_mel='''log10''', )
return log_mel_spec.T
def __call__( self : List[str], __A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None, __A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None, __A : Union[bool, str, PaddingStrategy] = False, __A : Optional[int] = None, __A : bool = False, __A : Optional[int] = None, __A : Optional[bool] = None, __A : Optional[Union[str, TensorType]] = None, __A : Optional[int] = None, **__A : int, ):
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
UpperCAmelCase : Union[str, Any] = self._process_audio(
__A, __A, __A, __A, __A, __A, __A, __A, **__A, )
else:
UpperCAmelCase : Optional[int] = None
if audio_target is not None:
UpperCAmelCase : Any = self._process_audio(
__A, __A, __A, __A, __A, __A, __A, __A, **__A, )
if inputs is None:
return inputs_target
else:
UpperCAmelCase : Optional[int] = inputs_target['''input_values''']
UpperCAmelCase : Dict = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
UpperCAmelCase : str = decoder_attention_mask
return inputs
def __magic_name__ ( self : Optional[Any], __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], __A : bool = False, __A : Union[bool, str, PaddingStrategy] = False, __A : Optional[int] = None, __A : bool = False, __A : Optional[int] = None, __A : Optional[bool] = None, __A : Optional[Union[str, TensorType]] = None, **__A : Tuple, ):
UpperCAmelCase : Union[str, Any] = isinstance(__A, np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
UpperCAmelCase : Union[str, Any] = is_batched_numpy or (
isinstance(__A, (list, tuple) ) and (isinstance(speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase : Tuple = [np.asarray(__A, dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__A, np.ndarray ):
UpperCAmelCase : Tuple = np.asarray(__A, dtype=np.floataa )
elif isinstance(__A, np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase : List[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase : int = [speech]
# needed to make pad() work on spectrogram inputs
UpperCAmelCase : Tuple = self.feature_size
# convert into correct format for padding
if is_target:
UpperCAmelCase : Dict = [self._extract_mel_features(__A ) for waveform in speech]
UpperCAmelCase : Optional[Any] = BatchFeature({'''input_values''': features} )
UpperCAmelCase : Tuple = self.num_mel_bins
else:
UpperCAmelCase : Optional[Any] = BatchFeature({'''input_values''': speech} )
UpperCAmelCase : List[str] = self.pad(
__A, padding=__A, max_length=__A, truncation=__A, pad_to_multiple_of=__A, return_attention_mask=__A, **__A, )
UpperCAmelCase : List[Any] = feature_size_hack
# convert input values to correct format
UpperCAmelCase : Optional[int] = padded_inputs['''input_values''']
if not isinstance(input_values[0], np.ndarray ):
UpperCAmelCase : List[Any] = [np.asarray(__A, dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__A, np.ndarray )
and isinstance(input_values[0], np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCAmelCase : str = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__A, np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCAmelCase : Dict = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCAmelCase : List[str] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
UpperCAmelCase : str = [np.asarray(__A, dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCAmelCase : int = (
attention_mask
if self._get_padding_strategies(__A, max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase : Any = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''], attention_mask=__A, padding_value=self.padding_value )
if return_tensors is not None:
UpperCAmelCase : Optional[Any] = padded_inputs.convert_to_tensors(__A )
return padded_inputs
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Any = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCAmelCase : int = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 336 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 336 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ : int = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 361 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : Union[str, Any] = KandinskyVaaControlnetPipeline
snake_case : Dict = ["""image_embeds""", """negative_image_embeds""", """hint"""]
snake_case : str = ["""image_embeds""", """negative_image_embeds""", """hint"""]
snake_case : Optional[int] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case : str = False
@property
def snake_case_ (self ):
return 3_2
@property
def snake_case_ (self ):
return 3_2
@property
def snake_case_ (self ):
return self.time_input_dim
@property
def snake_case_ (self ):
return self.time_input_dim * 4
@property
def snake_case_ (self ):
return 1_0_0
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : str = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_UpperCAmelCase : Union[str, Any] = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def snake_case_ (self ):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.dummy_unet
_UpperCAmelCase : str = self.dummy_movq
_UpperCAmelCase : Any = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCAmelCase__ , )
_UpperCAmelCase : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=0 ):
_UpperCAmelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create hint
_UpperCAmelCase : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith("""mps""" ):
_UpperCAmelCase : int = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCAmelCase : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = """cpu"""
_UpperCAmelCase : List[str] = self.get_dummy_components()
_UpperCAmelCase : str = self.pipeline_class(**lowerCAmelCase__ )
_UpperCAmelCase : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Any = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
_UpperCAmelCase : int = image[0, -3:, -3:, -1]
_UpperCAmelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase : Union[str, Any] = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case_ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ):
_UpperCAmelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
_UpperCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
_UpperCAmelCase : Union[str, Any] = torch.from_numpy(np.array(lowerCAmelCase__ ) ).float() / 2_5_5.0
_UpperCAmelCase : Union[str, Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_UpperCAmelCase : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
_UpperCAmelCase : str = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
_UpperCAmelCase : int = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : str = """A robot, 4k photo"""
_UpperCAmelCase : Dict = torch.Generator(device="""cuda""" ).manual_seed(0 )
_UpperCAmelCase , _UpperCAmelCase : Tuple = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_UpperCAmelCase : Dict = torch.Generator(device="""cuda""" ).manual_seed(0 )
_UpperCAmelCase : Tuple = pipeline(
image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , hint=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , output_type="""np""" , )
_UpperCAmelCase : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 170 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=_UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ['speech']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""speech"""] )
class lowercase ( metaclass=_UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ['speech']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""speech"""] )
| 46 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__(self , lowercase , **lowercase ):
return super().__call__(lowercase , **lowercase )
def _a (self , **lowercase ):
A_ : Tuple = {}
if "candidate_labels" in kwargs:
A_ : Dict = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
A_ : Optional[Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _a (self , lowercase , lowercase=None , lowercase="This is a sound of {}." ):
if isinstance(lowercase , lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A_ : Dict = requests.get(lowercase ).content
else:
with open(lowercase , """rb""" ) as f:
A_ : List[str] = f.read()
if isinstance(lowercase , lowercase ):
A_ : List[Any] = ffmpeg_read(lowercase , self.feature_extractor.sampling_rate )
if not isinstance(lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
A_ : int = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
A_ : List[Any] = candidate_labels
A_ : str = [hypothesis_template.format(lowercase ) for x in candidate_labels]
A_ : Optional[Any] = self.tokenizer(lowercase , return_tensors=self.framework , padding=lowercase )
A_ : Optional[Any] = [text_inputs]
return inputs
def _a (self , lowercase ):
A_ : Union[str, Any] = model_inputs.pop("""candidate_labels""" )
A_ : List[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , lowercase ):
A_ : Union[str, Any] = text_inputs[0]
else:
# Batching case.
A_ : Optional[int] = text_inputs[0][0]
A_ : str = self.model(**lowercase , **lowercase )
A_ : Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _a (self , lowercase ):
A_ : Union[str, Any] = model_outputs.pop("""candidate_labels""" )
A_ : List[Any] = model_outputs["""logits"""][0]
if self.framework == "pt":
A_ : Optional[Any] = logits.softmax(dim=0 )
A_ : str = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
A_ : Optional[int] = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowercase , lowercase ) , key=lambda lowercase : -x[0] )
]
return result | 206 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
_UpperCAmelCase = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
_UpperCAmelCase = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = RoFormerTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
A_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , lowercase ) != do_lower_case
or pre_tok_state.get('strip_accents' , lowercase ) != strip_accents
):
A_ : Union[str, Any] = getattr(lowercase , pre_tok_state.pop('type' ) )
A_ : Optional[int] = do_lower_case
A_ : List[Any] = strip_accents
A_ : List[str] = pre_tok_class(**lowercase )
A_ : int = do_lower_case
def __getstate__( self ):
"""simple docstring"""
A_ : Dict = self.__dict__.copy()
A_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = d
A_ : Any = self.__dict__['_tokenizer'].get_vocab()
A_ : Any = PreTokenizer.custom(JiebaPreTokenizer(lowercase ) )
def lowerCAmelCase_ ( self , lowercase , lowercase=None ):
"""simple docstring"""
A_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Tuple = [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Tuple = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase=None , lowercase=None , lowercase=False , **lowercase , ):
"""simple docstring"""
A_ : List[Any] = BertPreTokenizer()
return super().save_pretrained(lowercase , lowercase , lowercase , lowercase , **lowercase )
| 368 | _UpperCAmelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Tuple = input('Enter message: ' )
A_ : int = input('Enter key [alphanumeric]: ' )
A_ : Optional[Any] = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
A_ : List[Any] = 'encrypt'
A_ : int = encrypt_message(__lowercase ,__lowercase )
elif mode.lower().startswith('d' ):
A_ : Optional[Any] = 'decrypt'
A_ : Dict = decrypt_message(__lowercase ,__lowercase )
print(f'''\n{mode.title()}ed message:''' )
print(__lowercase )
def UpperCamelCase ( __lowercase : str ,__lowercase : str ):
'''simple docstring'''
return translate_message(__lowercase ,__lowercase ,'encrypt' )
def UpperCamelCase ( __lowercase : str ,__lowercase : str ):
'''simple docstring'''
return translate_message(__lowercase ,__lowercase ,'decrypt' )
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : str ):
'''simple docstring'''
A_ : Tuple = []
A_ : str = 0
A_ : Optional[int] = key.upper()
for symbol in message:
A_ : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowercase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowercase ):
A_ : str = 0
else:
translated.append(__lowercase )
return "".join(__lowercase )
if __name__ == "__main__":
main()
| 192 | 0 |
import mpmath # for roots of unity
import numpy as np
class _snake_case :
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None ):
# Input as list
a :int = list(poly_a or [0] )[:]
a :List[Any] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
a :Tuple = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
a :Any = len(self.polyB )
# Add 0 to make lengths equal a power of 2
a :Dict = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
a :Union[str, Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
a :Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[Any] = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(_lowerCamelCase ) <= 1:
return dft[0]
#
a :Dict = self.c_max_length // 2
while next_ncol > 0:
a :Union[str, Any] = [[] for i in range(_lowerCamelCase )]
a :Union[str, Any] = self.root**next_ncol
# First half of next step
a :str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCamelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
a :int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCamelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
a :Tuple = new_dft
a :Optional[Any] = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = self.__dft('''A''' )
a :List[Any] = self.__dft('''B''' )
a :Dict = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
a :Dict = 2
while next_ncol <= self.c_max_length:
a :str = [[] for i in range(_lowerCamelCase )]
a :List[Any] = self.root ** (next_ncol // 2)
a :List[str] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
a :int = new_inverse_c
next_ncol *= 2
# Unpack
a :Any = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ):
a :Dict = '''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
a :Any = '''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
a :Tuple = '''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : int ):
snake_case_ : Dict = params
snake_case_ : Union[str, Any] = np.array(lowercase_ )
snake_case_ : str = np.array([len(lowercase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , lowercase_ : Union[str, Any] ):
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[Any] ):
return len(self.lengths )
def _snake_case ( self : Tuple ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _snake_case ( self : Tuple ):
snake_case_ : str = self.params.max_model_input_size
snake_case_ : Dict = self.lengths > max_len
logger.info(f"Splitting {sum(lowercase_ )} too long sequences." )
def divide_chunks(lowercase_ : Tuple , lowercase_ : Optional[Any] ):
return [l[i : i + n] for i in range(0 , len(lowercase_ ) , lowercase_ )]
snake_case_ : Tuple = []
snake_case_ : Any = []
if self.params.mlm:
snake_case_, snake_case_ : Union[str, Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
snake_case_, snake_case_ : Dict = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ : Dict = np.insert(lowercase_ , 0 , lowercase_ )
if sub_s[-1] != sep_id:
snake_case_ : Tuple = np.insert(lowercase_ , len(lowercase_ ) , lowercase_ )
assert len(lowercase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase_ )
new_tok_ids.extend(lowercase_ )
new_lengths.extend([len(lowercase_ ) for l in sub_seqs] )
snake_case_ : List[str] = np.array(lowercase_ )
snake_case_ : Optional[Any] = np.array(lowercase_ )
def _snake_case ( self : Optional[int] ):
snake_case_ : List[Any] = len(self )
snake_case_ : List[str] = self.lengths > 11
snake_case_ : Dict = self.token_ids[indices]
snake_case_ : Dict = self.lengths[indices]
snake_case_ : str = len(self )
logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def _snake_case ( self : Tuple ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ : str = self.params.special_tok_ids['''unk_token''']
snake_case_ : str = len(self )
snake_case_ : int = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ : str = (unk_occs / self.lengths) < 0.5
snake_case_ : Optional[Any] = self.token_ids[indices]
snake_case_ : Optional[int] = self.lengths[indices]
snake_case_ : Dict = len(self )
logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def _snake_case ( self : Dict ):
if not self.params.is_master:
return
logger.info(f"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _snake_case ( self : List[str] , lowercase_ : Dict ):
snake_case_ : Optional[int] = [t[0] for t in batch]
snake_case_ : str = [t[1] for t in batch]
assert len(lowercase_ ) == len(lowercase_ )
# Max for paddings
snake_case_ : str = max(lowercase_ )
# Pad token ids
if self.params.mlm:
snake_case_ : Tuple = self.params.special_tok_ids['''pad_token''']
else:
snake_case_ : Dict = self.params.special_tok_ids['''unk_token''']
snake_case_ : Any = [list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids]
assert len(tk_ ) == len(lowercase_ )
assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ )
snake_case_ : str = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ : Optional[int] = torch.tensor(lowercase_ ) # (bs)
return tk_t, lg_t
| 264 | 0 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A : Dict = """pt"""
elif is_tf_available():
__A : Any = """tf"""
else:
__A : Union[str, Any] = """jax"""
class __snake_case ( lowerCAmelCase__ ,unittest.TestCase):
"""simple docstring"""
lowercase = ByTaTokenizer
lowercase = False
def __lowercase ( self : str ) -> Tuple:
super().setUp()
lowerCAmelCase_ : Dict = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase ( self : Dict ) -> Union[str, Any]:
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def __lowercase ( self : Optional[int] , **lowerCamelCase : Optional[int] ) -> List[str]:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def __lowercase ( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Dict=20 , lowerCamelCase : Tuple=5 ) -> Dict:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(a__ ) ):
try:
lowerCAmelCase_ : str = tokenizer.decode([i] , clean_up_tokenization_spaces=a__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ : Any = list(filter(lambda lowerCamelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , a__ ) )
lowerCAmelCase_ : List[str] = list(filter(lambda lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a__ ) , a__ ) )
if max_length is not None and len(a__ ) > max_length:
lowerCAmelCase_ : Dict = toks[:max_length]
if min_length is not None and len(a__ ) < min_length and len(a__ ) > 0:
while len(a__ ) < min_length:
lowerCAmelCase_ : str = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : List[Any] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : Tuple = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
if " " not in output_txt and len(a__ ) > 1:
lowerCAmelCase_ : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a__ )
)
if with_prefix_space:
lowerCAmelCase_ : Any = """ """ + output_txt
lowerCAmelCase_ : Dict = tokenizer.encode(a__ , add_special_tokens=a__ )
return output_txt, output_ids
def __lowercase ( self : int ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = self.ta_base_tokenizer
lowerCAmelCase_ : int = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
lowerCAmelCase_ : Tuple = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def __lowercase ( self : Optional[int] ) -> int:
lowerCAmelCase_ : Optional[Any] = self.ta_base_tokenizer
lowerCAmelCase_ : List[str] = """Unicode €."""
lowerCAmelCase_ : Optional[int] = tokenizer(a__ )
lowerCAmelCase_ : int = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded["""input_ids"""] , a__ )
# decoding
lowerCAmelCase_ : int = tokenizer.decode(a__ )
self.assertEqual(a__ , """Unicode €.</s>""" )
lowerCAmelCase_ : Optional[int] = tokenizer("""e è é ê ë""" )
lowerCAmelCase_ : List[Any] = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded["""input_ids"""] , a__ )
# decoding
lowerCAmelCase_ : int = tokenizer.decode(a__ )
self.assertEqual(a__ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def __lowercase ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = self.ta_base_tokenizer
lowerCAmelCase_ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
lowerCAmelCase_ : Dict = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
lowerCAmelCase_ : Union[str, Any] = tokenizer(a__ , padding=a__ , return_tensors=a__ )
self.assertIsInstance(a__ , a__ )
if FRAMEWORK != "jax":
lowerCAmelCase_ : Union[str, Any] = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a__ , a__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __lowercase ( self : List[Any] ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = self.ta_base_tokenizer
lowerCAmelCase_ : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase_ : str = tokenizer(a__ , padding=a__ , return_tensors=a__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , a__ )
self.assertIn("""attention_mask""" , a__ )
self.assertNotIn("""decoder_input_ids""" , a__ )
self.assertNotIn("""decoder_attention_mask""" , a__ )
def __lowercase ( self : List[str] ) -> int:
lowerCAmelCase_ : Optional[Any] = self.ta_base_tokenizer
lowerCAmelCase_ : int = [
"""Summary of the text.""",
"""Another summary.""",
]
lowerCAmelCase_ : str = tokenizer(
text_target=a__ , max_length=32 , padding="""max_length""" , truncation=a__ , return_tensors=a__ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def __lowercase ( self : Optional[Any] ) -> Optional[int]:
lowerCAmelCase_ : Any = self.ta_base_tokenizer
lowerCAmelCase_ : Union[str, Any] = ["""A long paragraph for summarization. </s>"""]
lowerCAmelCase_ : str = ["""Summary of the text. </s>"""]
# fmt: off
lowerCAmelCase_ : str = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
lowerCAmelCase_ : int = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
lowerCAmelCase_ : Union[str, Any] = tokenizer(a__ , text_target=a__ )
self.assertEqual(a__ , batch["""input_ids"""][0] )
self.assertEqual(a__ , batch["""labels"""][0] )
def __lowercase ( self : str ) -> Dict:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase_ : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Any = tempfile.mkdtemp()
lowerCAmelCase_ : Union[str, Any] = """ He is very happy, UNwant\u00E9d,running"""
lowerCAmelCase_ : Optional[int] = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(a__ )
lowerCAmelCase_ : Union[str, Any] = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
shutil.rmtree(a__ )
lowerCAmelCase_ : List[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : Any = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
lowerCAmelCase_ : List[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
lowerCAmelCase_ : List[Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
lowerCAmelCase_ : int = tokenizer.__class__.from_pretrained(a__ )
lowerCAmelCase_ : List[str] = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase_ : Optional[int] = tokenizer.__class__.from_pretrained(a__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a__ )
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase_ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
with open(os.path.join(a__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCAmelCase_ : List[Any] = json.load(a__ )
with open(os.path.join(a__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCAmelCase_ : Union[str, Any] = json.load(a__ )
lowerCAmelCase_ : Dict = [F'<extra_id_{i}>' for i in range(1_25 )]
lowerCAmelCase_ : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
lowerCAmelCase_ : int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(a__ , a__ )
with open(os.path.join(a__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(a__ , a__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : List[str] = tokenizer_class.from_pretrained(
a__ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Any = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=a__ )]
lowerCAmelCase_ : str = tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def __lowercase ( self : str ) -> Union[str, Any]:
lowerCAmelCase_ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
lowerCAmelCase_ : Tuple = tokenizer_class.from_pretrained(a__ )
self.assertTrue(tokenizer.decode([2_55] ) == """""" )
def __lowercase ( self : List[Any] ) -> Optional[Any]:
pass
def __lowercase ( self : List[str] ) -> Union[str, Any]:
pass
def __lowercase ( self : Optional[int] ) -> Dict:
pass
def __lowercase ( self : Union[str, Any] ) -> str:
pass
def __lowercase ( self : Optional[Any] ) -> Optional[int]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCAmelCase_ : Optional[Any] = self.get_tokenizers(fast=a__ , do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase_ : Optional[Any] = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
lowerCAmelCase_ : Tuple = tokenizer.convert_tokens_to_string(a__ )
self.assertIsInstance(a__ , a__ )
def __lowercase ( self : Any ) -> Optional[Any]:
lowerCAmelCase_ : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase_ : List[str] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(
a__ , skip_special_tokens=a__ )
for attr in attributes_list:
setattr(a__ , attr + """_id""" , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + """_id""" ) , a__ )
setattr(a__ , attr + """_id""" , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + """_id""" ) , a__ )
setattr(a__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(a__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(a__ , """additional_special_tokens_ids""" ) , [] )
setattr(a__ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(a__ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(a__ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 364 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@property
def __lowercase ( self : str ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __lowercase ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase_ : Optional[int] = self.dummy_uncond_unet
lowerCAmelCase_ : Tuple = PNDMScheduler()
lowerCAmelCase_ : List[Any] = PNDMPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pndm.to(lowerCamelCase )
pndm.set_progress_bar_config(disable=lowerCamelCase )
lowerCAmelCase_ : Dict = torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = pndm(generator=lowerCamelCase , num_inference_steps=20 , output_type="""numpy""" ).images
lowerCAmelCase_ : str = torch.manual_seed(0 )
lowerCAmelCase_ : int = pndm(generator=lowerCamelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=lowerCamelCase )[0]
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : str ) -> Tuple:
lowerCAmelCase_ : str = """google/ddpm-cifar10-32"""
lowerCAmelCase_ : Dict = UNetaDModel.from_pretrained(lowerCamelCase )
lowerCAmelCase_ : Dict = PNDMScheduler()
lowerCAmelCase_ : Union[str, Any] = PNDMPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
pndm.to(lowerCamelCase )
pndm.set_progress_bar_config(disable=lowerCamelCase )
lowerCAmelCase_ : Any = torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pndm(generator=lowerCamelCase , output_type="""numpy""" ).images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ : List[Any] = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 89 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase__ = 2_5_6_0_4_7
UpperCamelCase__ = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = NllbTokenizer
lowerCAmelCase__ = NllbTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = {}
def lowercase_ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Dict = NllbTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = NllbTokenizer(__a , keep_accents=__a )
UpperCAmelCase__ : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase__ : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase__ : Dict = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase__ : int = tempfile.mkdtemp()
UpperCAmelCase__ : Optional[int] = tokenizer_r.save_pretrained(__a )
UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
UpperCAmelCase__ : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Optional[int] = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ : Dict = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ : List[Any] = tempfile.mkdtemp()
UpperCAmelCase__ : int = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase__ : Dict = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Dict = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ : Union[str, Any] = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ : Tuple = tempfile.mkdtemp()
UpperCAmelCase__ : Tuple = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase__ : Any = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : int = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ : Any = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@require_torch
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
if not self.test_seqaseq:
return
UpperCAmelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
UpperCAmelCase__ : Optional[int] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
UpperCAmelCase__ : Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
UpperCAmelCase__ : Tuple = tokenizer.prepare_seqaseq_batch(
src_texts=__a , tgt_texts=__a , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
UpperCAmelCase__ : Dict = tokenizer.prepare_seqaseq_batch(
__a , tgt_texts=__a , max_length=3 , return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
UpperCAmelCase__ : int = tokenizer.prepare_seqaseq_batch(
src_texts=__a , max_length=3 , max_target_length=10 , return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('''decoder_input_ids''' , __a )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : Optional[int] = [AddedToken('''<special>''' , lstrip=__a )]
UpperCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , **__a )
UpperCAmelCase__ : int = tokenizer_r.encode('''Hey this is a <special> token''' )
UpperCAmelCase__ : List[Any] = tokenizer_r.encode('''<special>''' , add_special_tokens=__a )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , **__a , )
UpperCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , **__a )
UpperCAmelCase__ : Optional[Any] = tokenizer_p.encode('''Hey this is a <special> token''' )
UpperCAmelCase__ : int = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = """facebook/nllb-200-distilled-600M"""
lowerCAmelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase__ = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def lowercase_ ( cls : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' )
UpperCAmelCase__ : Dict = 1
return cls
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 256_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 256_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 256_057 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def lowercase_ ( self : Dict ):
'''simple docstring'''
self.assertIn(__a , self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase__ : Optional[int] = [RO_CODE, 4_254, 98_068, 112_923, 39_072, 3_909, 713, 102_767, 26, 17_314, 35_642, 14_683, 33_118, 2_022, 66_987, 2, 256_047]
# fmt: on
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase__ : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , __a )
UpperCAmelCase__ : List[str] = 10
UpperCAmelCase__ : List[Any] = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __a )
self.assertEqual(len(__a ) , __a )
def lowercase_ ( self : str ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [256_203, 3] )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
UpperCAmelCase__ : Union[str, Any] = NllbTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
UpperCAmelCase__ : List[Any] = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
UpperCAmelCase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(__a , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors='''pt''' )
UpperCAmelCase__ : Dict = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors='''pt''' )
UpperCAmelCase__ : Union[str, Any] = targets['''input_ids''']
UpperCAmelCase__ : Optional[int] = shift_tokens_right(
__a , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
'''input_ids''': [[256_047, 70, 7_356, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 256_057,
} , )
@require_torch
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2, 256_047] )
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Any = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [256_047, 16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2] )
| 181 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCamelCase :
'''simple docstring'''
pass
| 57 | 0 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( __snake_case , __snake_case ):
'''simple docstring'''
@register_to_config
def __init__(self , *,
UpperCAmelCase = 4 , UpperCAmelCase = 768 , UpperCAmelCase , UpperCAmelCase , ) -> int:
super().__init__()
_snake_case = nn.Parameter(torch.zeros(lowercase_ ) )
# parameters for additional clip time embeddings
_snake_case = nn.Linear(lowercase_ , lowercase_ )
_snake_case = nn.Linear(lowercase_ , lowercase_ )
# parameters for encoder hidden states
_snake_case = clip_extra_context_tokens
_snake_case = nn.Linear(
lowercase_ , self.clip_extra_context_tokens * cross_attention_dim )
_snake_case = nn.Linear(lowercase_ , lowercase_ )
_snake_case = nn.LayerNorm(lowercase_ )
def lowercase (self , *, UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_snake_case = image_embeddings.shape[0]
_snake_case = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_snake_case = classifier_free_guidance_embeddings.expand(
lowercase_ , -1 )
_snake_case = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_snake_case = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_snake_case = self.embedding_proj(lowercase_ )
_snake_case = self.clip_image_embeddings_project_to_time_embeddings(lowercase_ )
_snake_case = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_snake_case = self.clip_extra_context_tokens_proj(lowercase_ )
_snake_case = clip_extra_context_tokens.reshape(lowercase_ , -1 , self.clip_extra_context_tokens )
_snake_case = clip_extra_context_tokens.permute(0 , 2 , 1 )
_snake_case = self.encoder_hidden_states_proj(lowercase_ )
_snake_case = self.text_encoder_hidden_states_norm(lowercase_ )
_snake_case = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings | 369 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase (self ) -> Dict:
_snake_case, _snake_case = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case, _snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case = controlnet_params
_snake_case = """bird"""
_snake_case = jax.device_count()
_snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
_snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
_snake_case = pipe.prepare_image_inputs([canny_image] * num_samples )
_snake_case = jax.random.PRNGKey(0 )
_snake_case = jax.random.split(UpperCAmelCase , jax.device_count() )
_snake_case = replicate(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = pipe(
prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case = images[0, 253:256, 253:256, -1]
_snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowercase (self ) -> Optional[int]:
_snake_case, _snake_case = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case, _snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
_snake_case = controlnet_params
_snake_case = """Chef in the kitchen"""
_snake_case = jax.device_count()
_snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
_snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
_snake_case = pipe.prepare_image_inputs([pose_image] * num_samples )
_snake_case = jax.random.PRNGKey(0 )
_snake_case = jax.random.split(UpperCAmelCase , jax.device_count() )
_snake_case = replicate(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = shard(UpperCAmelCase )
_snake_case = pipe(
prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case = images[0, 253:256, 253:256, -1]
_snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 270 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A( a , a , unittest.TestCase ):
snake_case_ = AutoencoderKL
snake_case_ = '''sample'''
snake_case_ = 1E-2
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = 4
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__a = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a , __a = self.prepare_init_args_and_inputs_for_common()
__a = self.model_class(**_snake_case )
model.to(_snake_case )
assert not model.is_gradient_checkpointing and model.training
__a = model(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__a = torch.randn_like(_snake_case )
__a = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__a = self.model_class(**_snake_case )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_snake_case )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__a = model_a(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__a = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__a = dict(model.named_parameters() )
__a = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_snake_case )
__a = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
__a = model.to(_snake_case )
model.eval()
if torch_device == "mps":
__a = torch.manual_seed(0 )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(0 )
__a = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__a = image.to(_snake_case )
with torch.no_grad():
__a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample
__a = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__a = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
__a = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__a = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) )
@slow
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy"""
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any:
'''simple docstring'''
__a = torch.floataa if fpaa else torch.floataa
__a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case )
return image
def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]:
'''simple docstring'''
__a = '''fp16''' if fpaa else None
__a = torch.floataa if fpaa else torch.floataa
__a = AutoencoderKL.from_pretrained(
_snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , )
model.to(_snake_case ).eval()
return model
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(_snake_case )
return torch.Generator(device=_snake_case ).manual_seed(_snake_case )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , fpaa=_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
with torch.no_grad():
__a = model(_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model.encode(_snake_case ).latent_dist
__a = dist.sample(generator=_snake_case )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__a = sample[0, -1, -3:, -3:].flatten().cpu()
__a = torch.tensor(_snake_case )
__a = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(_snake_case , _snake_case , atol=_snake_case ) | 6 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
A : Optional[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
A : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
A : Optional[int] = 'zero2'
A : str = 'zero3'
A : Tuple = [ZEROa, ZEROa]
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Tuple:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(a__ ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
A : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A( a ):
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> str:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
@require_torch_multi_gpu
@parameterized.expand(_snake_case , name_func=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
self.run_and_check(
stage=_snake_case , model=_snake_case , distributed=_snake_case , fpaa=_snake_case , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = True , _snake_case = True , _snake_case = True , ) -> Any:
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=_snake_case , model_name=_snake_case , eval_steps=_snake_case , num_train_epochs=1 , distributed=_snake_case , fpaa=_snake_case , )
self.do_checks(_snake_case )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 10 , _snake_case = 1 , _snake_case = True , _snake_case = True , ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=_snake_case )
__a = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
__a = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
__a = self.get_launcher(_snake_case )
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_snake_case , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> List[str]:
'''simple docstring'''
__a = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split() | 6 | 1 |
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase__ :
def __init__( self : str ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = psutil.Process()
_UpperCAmelCase : int = False
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = -1
while True:
_UpperCAmelCase : Optional[int] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = True
_UpperCAmelCase : Union[str, Any] = threading.Thread(target=self.peak_monitor )
_UpperCAmelCase : Any = True
self.thread.start()
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase__ = PeakCPUMemory()
def __lowerCAmelCase () -> Dict:
# Time
_UpperCAmelCase : Dict = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCAmelCase : List[str] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCAmelCase : Union[str, Any] = torch.cuda.memory_allocated(__lowerCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __lowerCAmelCase (__lowerCAmelCase ) -> str:
# Time
_UpperCAmelCase : Union[str, Any] = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCAmelCase : List[Any] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
_UpperCAmelCase : Union[str, Any] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCAmelCase : List[str] = (torch.cuda.memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20
_UpperCAmelCase : Tuple = (torch.cuda.max_memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20
return measures
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ) -> str:
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__lowerCAmelCase )]:.2f}MiB""" )
_UpperCAmelCase : Union[str, Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 353 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase__ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCamelCase__ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCamelCase__ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCamelCase__ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] ) ->int:
'''simple docstring'''
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int=0.9 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : Dict=0.5 ) ->Any:
'''simple docstring'''
if NLTK_VERSION >= version.Version("3.6.5" ):
_UpperCAmelCase : Dict = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase__ ) , word_tokenize(lowerCamelCase__ ) , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ )
for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ )
]
else:
_UpperCAmelCase : Optional[int] = [
meteor_score.single_meteor_score(lowerCamelCase__ , lowerCamelCase__ , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ )
for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ )
]
return {"meteor": np.mean(lowerCamelCase__ )}
| 322 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowerCAmelCase ( nn.Module ):
snake_case_ : int
snake_case_ : int
snake_case_ : float = 0.0
snake_case_ : int = 1
snake_case_ : int = 1
snake_case_ : bool = True
snake_case_ : bool = False
snake_case_ : bool = False
snake_case_ : bool = False
snake_case_ : jnp.dtype = jnp.floataa
def UpperCamelCase ( self : int ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
for i in range(self.num_layers ):
_UpperCAmelCase = self.in_channels if i == 0 else self.out_channels
_UpperCAmelCase = FlaxResnetBlockaD(
in_channels=snake_case__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(snake_case__ )
_UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(snake_case__ )
_UpperCAmelCase = resnets
_UpperCAmelCase = attentions
if self.add_downsample:
_UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , snake_case__ : str , snake_case__ : Any , snake_case__ : int , snake_case__ : Any=True ):
"""simple docstring"""
_UpperCAmelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_UpperCAmelCase = resnet(snake_case__ , snake_case__ , deterministic=snake_case__ )
_UpperCAmelCase = attn(snake_case__ , snake_case__ , deterministic=snake_case__ )
output_states += (hidden_states,)
if self.add_downsample:
_UpperCAmelCase = self.downsamplers_a(snake_case__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
snake_case_ : int
snake_case_ : int
snake_case_ : float = 0.0
snake_case_ : int = 1
snake_case_ : bool = True
snake_case_ : jnp.dtype = jnp.floataa
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = []
for i in range(self.num_layers ):
_UpperCAmelCase = self.in_channels if i == 0 else self.out_channels
_UpperCAmelCase = FlaxResnetBlockaD(
in_channels=snake_case__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(snake_case__ )
_UpperCAmelCase = resnets
if self.add_downsample:
_UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[str]=True ):
"""simple docstring"""
_UpperCAmelCase = ()
for resnet in self.resnets:
_UpperCAmelCase = resnet(snake_case__ , snake_case__ , deterministic=snake_case__ )
output_states += (hidden_states,)
if self.add_downsample:
_UpperCAmelCase = self.downsamplers_a(snake_case__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
snake_case_ : int
snake_case_ : int
snake_case_ : int
snake_case_ : float = 0.0
snake_case_ : int = 1
snake_case_ : int = 1
snake_case_ : bool = True
snake_case_ : bool = False
snake_case_ : bool = False
snake_case_ : bool = False
snake_case_ : jnp.dtype = jnp.floataa
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
for i in range(self.num_layers ):
_UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
_UpperCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(snake_case__ )
_UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(snake_case__ )
_UpperCAmelCase = resnets
_UpperCAmelCase = attentions
if self.add_upsample:
_UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[str]=True ):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_UpperCAmelCase = res_hidden_states_tuple[-1]
_UpperCAmelCase = res_hidden_states_tuple[:-1]
_UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_UpperCAmelCase = resnet(snake_case__ , snake_case__ , deterministic=snake_case__ )
_UpperCAmelCase = attn(snake_case__ , snake_case__ , deterministic=snake_case__ )
if self.add_upsample:
_UpperCAmelCase = self.upsamplers_a(snake_case__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
snake_case_ : int
snake_case_ : int
snake_case_ : int
snake_case_ : float = 0.0
snake_case_ : int = 1
snake_case_ : bool = True
snake_case_ : jnp.dtype = jnp.floataa
def UpperCamelCase ( self : int ):
"""simple docstring"""
_UpperCAmelCase = []
for i in range(self.num_layers ):
_UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
_UpperCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(snake_case__ )
_UpperCAmelCase = resnets
if self.add_upsample:
_UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int=True ):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
_UpperCAmelCase = res_hidden_states_tuple[-1]
_UpperCAmelCase = res_hidden_states_tuple[:-1]
_UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_UpperCAmelCase = resnet(snake_case__ , snake_case__ , deterministic=snake_case__ )
if self.add_upsample:
_UpperCAmelCase = self.upsamplers_a(snake_case__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
snake_case_ : int
snake_case_ : float = 0.0
snake_case_ : int = 1
snake_case_ : int = 1
snake_case_ : bool = False
snake_case_ : bool = False
snake_case_ : jnp.dtype = jnp.floataa
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
_UpperCAmelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_UpperCAmelCase = []
for _ in range(self.num_layers ):
_UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(snake_case__ )
_UpperCAmelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(snake_case__ )
_UpperCAmelCase = resnets
_UpperCAmelCase = attentions
def __call__( self : Tuple , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[str]=True ):
"""simple docstring"""
_UpperCAmelCase = self.resnets[0](snake_case__ , snake_case__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_UpperCAmelCase = attn(snake_case__ , snake_case__ , deterministic=snake_case__ )
_UpperCAmelCase = resnet(snake_case__ , snake_case__ , deterministic=snake_case__ )
return hidden_states
| 133 |
from typing import Any
import numpy as np
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return np.array_equal(snake_case_ , matrix.conjugate().T )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = v.conjugate().T
_UpperCAmelCase = v_star.dot(snake_case_ )
assert isinstance(snake_case_ , np.ndarray )
return (v_star_dot.dot(snake_case_ )) / (v_star.dot(snake_case_ ))
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_UpperCAmelCase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case_ ), f"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case_ , snake_case_ ) )
_UpperCAmelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case_ ), f"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case_ , snake_case_ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 133 | 1 |
"""simple docstring"""
import re
def lowerCAmelCase (__UpperCamelCase : List[Any] ):
"""simple docstring"""
if len(re.findall('''[ATCG]''' , UpperCAmelCase__ ) ) != len(UpperCAmelCase__ ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 | """simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase (__UpperCamelCase : dict , __UpperCamelCase : str , __UpperCamelCase : set , __UpperCamelCase : set , __UpperCamelCase : dict , __UpperCamelCase : dict , __UpperCamelCase : PriorityQueue , __UpperCamelCase : dict , __UpperCamelCase : float | int , ):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__UpperCamelCase =cst_fwd.get(__UpperCamelCase , np.inf )
__UpperCamelCase =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__UpperCamelCase =new_cost_f
__UpperCamelCase =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__UpperCamelCase =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : dict , __UpperCamelCase : dict ):
"""simple docstring"""
__UpperCamelCase =-1
__UpperCamelCase =set()
__UpperCamelCase =set()
__UpperCamelCase ={source: 0}
__UpperCamelCase ={destination: 0}
__UpperCamelCase ={source: None}
__UpperCamelCase ={destination: None}
__UpperCamelCase =PriorityQueue()
__UpperCamelCase =PriorityQueue()
__UpperCamelCase =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__UpperCamelCase , __UpperCamelCase =queue_forward.get()
visited_forward.add(__UpperCamelCase )
__UpperCamelCase , __UpperCamelCase =queue_backward.get()
visited_backward.add(__UpperCamelCase )
__UpperCamelCase =pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
__UpperCamelCase =pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__UpperCamelCase =shortest_distance
return shortest_path_distance
__lowercase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowercase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
snake_case__ : List[str] = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def _a ( lowerCamelCase: Optional[int] , lowerCamelCase: Dict=None ) -> Any:
'''simple docstring'''
require_version(deps[pkg] , lowerCamelCase )
| 117 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : Tuple = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _a ( lowerCamelCase: Any , lowerCamelCase: Union[str, Any] , lowerCamelCase: int=None , lowerCamelCase: Optional[Any]=None , lowerCamelCase: Tuple=None , lowerCamelCase: Union[str, Any]=None , lowerCamelCase: Optional[Any]=None , lowerCamelCase: Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
__A = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__A = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__A = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__A = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__A = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
def __init__(self :int , _UpperCamelCase :Optional[int] , _UpperCamelCase :Dict=13 , _UpperCamelCase :Optional[Any]=7 , _UpperCamelCase :str=True , _UpperCamelCase :Tuple=False , _UpperCamelCase :int=99 , _UpperCamelCase :int=16 , _UpperCamelCase :int=2 , _UpperCamelCase :int=4 , _UpperCamelCase :str=4 , _UpperCamelCase :Dict="gelu" , _UpperCamelCase :int=0.1 , _UpperCamelCase :Tuple=0.1 , _UpperCamelCase :Union[str, Any]=32 , _UpperCamelCase :Any=2 , _UpperCamelCase :Union[str, Any]=1 , _UpperCamelCase :Tuple=0 , _UpperCamelCase :List[str]=0.0_2 , )-> str:
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = eos_token_id
__A = pad_token_id
__A = bos_token_id
__A = initializer_range
def _lowerCAmelCase (self :Optional[int] )-> Union[str, Any]:
__A = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__A = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__A = shift_tokens_right(_UpperCamelCase , 1 , 2 )
__A = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCamelCase , )
__A = prepare_blenderbot_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, inputs_dict
def _lowerCAmelCase (self :Union[str, Any] )-> Tuple:
__A , __A = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase (self :Dict , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Dict , _UpperCamelCase :Optional[int] )-> str:
__A = 20
__A = model_class_name(_UpperCamelCase )
__A = model.encode(inputs_dict['''input_ids'''] )
__A , __A = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__A = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
__A = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__A = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
__A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__A = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCamelCase , )
__A = model.decode(_UpperCamelCase , _UpperCamelCase )
__A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :int , _UpperCamelCase :List[str] , _UpperCamelCase :Any )-> Dict:
__A = 20
__A = model_class_name(_UpperCamelCase )
__A = model.encode(inputs_dict['''input_ids'''] )
__A , __A = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__A = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__A = model.init_cache(decoder_input_ids.shape[0] , _UpperCamelCase , _UpperCamelCase )
__A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__A = model.decode(
decoder_input_ids[:, :-1] , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
__A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__A = model.decode(
decoder_input_ids[:, -1:] , _UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCamelCase , decoder_position_ids=_UpperCamelCase , )
__A = model.decode(_UpperCamelCase , _UpperCamelCase , decoder_attention_mask=_UpperCamelCase )
__A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class A_ ( unittest.TestCase ):
lowerCAmelCase__ = 99
def _lowerCAmelCase (self :Dict )-> int:
__A = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__A = input_ids.shape[0]
__A = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCAmelCase (self :Any )-> List[Any]:
__A , __A , __A = self._get_config_and_data()
__A = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
__A = lm_model(input_ids=_UpperCamelCase )
__A = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCamelCase )
def _lowerCAmelCase (self :int )-> Dict:
__A = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__A = FlaxBlenderbotForConditionalGeneration(_UpperCamelCase )
__A = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__A = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__A = lm_model(input_ids=_UpperCamelCase , decoder_input_ids=_UpperCamelCase )
__A = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCamelCase )
def _lowerCAmelCase (self :Tuple )-> Tuple:
__A = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__A = shift_tokens_right(_UpperCamelCase , 1 , 2 )
__A = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
__A = np.equal(_UpperCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ ( _lowerCamelCase , unittest.TestCase , _lowerCamelCase ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _lowerCAmelCase (self :List[str] )-> Optional[int]:
__A = FlaxBlenderbotModelTester(self )
def _lowerCAmelCase (self :List[str] )-> List[str]:
__A , __A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase (self :Dict )-> List[str]:
__A , __A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] )-> Union[str, Any]:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__A = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
__A = model_class(_UpperCamelCase )
@jax.jit
def encode_jitted(_UpperCamelCase :int , _UpperCamelCase :int=None , **_UpperCamelCase :Dict ):
return model.encode(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )
with self.subTest('''JIT Enabled''' ):
__A = encode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__A = encode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCAmelCase (self :List[str] )-> List[Any]:
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__A = model_class(_UpperCamelCase )
__A = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__A = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCamelCase :str , _UpperCamelCase :Tuple , _UpperCamelCase :Dict ):
return model.decode(
decoder_input_ids=_UpperCamelCase , decoder_attention_mask=_UpperCamelCase , encoder_outputs=_UpperCamelCase , )
with self.subTest('''JIT Enabled''' ):
__A = decode_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__A = decode_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCAmelCase (self :int )-> Optional[int]:
for model_class_name in self.all_model_classes:
__A = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__A = np.ones((1, 1) ) * model.config.eos_token_id
__A = model(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def _lowerCAmelCase (self :Dict )-> List[str]:
__A = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
__A = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
__A = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_UpperCamelCase )
__A = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
__A = ['''Sam''']
__A = tokenizer(_UpperCamelCase , return_tensors='''jax''' )
__A = model.generate(**_UpperCamelCase , **_UpperCamelCase )
__A = '''Sam is a great name. It means "sun" in Gaelic.'''
__A = tokenizer.batch_decode(_UpperCamelCase , **_UpperCamelCase )
assert generated_txt[0].strip() == tgt_text
| 117 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
snake_case : str = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCAmelCase_ , PIL.Image.Image ):
a :Union[str, Any] = [image]
a :int = [trans(img.convert('''RGB''' ) ) for img in image]
a :Optional[int] = torch.stack(UpperCAmelCase_ )
return image
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__()
# make sure scheduler can always be converted to DDIM
a :int = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# get the original timestep using init_timestep
a :List[str] = min(int(num_inference_steps * strength ) , _lowerCamelCase )
a :Union[str, Any] = max(num_inference_steps - init_timestep , 0 )
a :List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
if not isinstance(_lowerCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowerCamelCase )}''' )
a :Any = image.to(device=_lowerCamelCase , dtype=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
a :List[str] = init_latents.shape
a :Tuple = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
# get latents
print('''add noise to latents at timestep''' , _lowerCamelCase )
a :int = self.scheduler.add_noise(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
a :Tuple = init_latents
return latents
@torch.no_grad()
def __call__( self , _lowerCamelCase = None , _lowerCamelCase = 0.8 , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = 0.0 , _lowerCamelCase = 50 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ):
self.check_inputs(_lowerCamelCase )
# 2. Preprocess image
a :Tuple = preprocess(_lowerCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(_lowerCamelCase , device=self.device )
a , a :Optional[int] = self.get_timesteps(_lowerCamelCase , _lowerCamelCase , self.device )
a :Dict = timesteps[:1].repeat(_lowerCamelCase )
# 4. Prepare latent variables
a :Dict = self.prepare_latents(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.unet.dtype , self.device , _lowerCamelCase )
a :Union[str, Any] = latents
# 5. Denoising loop
for t in self.progress_bar(_lowerCamelCase ):
# 1. predict noise model_output
a :Tuple = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a :Dict = self.scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , eta=_lowerCamelCase , use_clipped_model_output=_lowerCamelCase , generator=_lowerCamelCase , ).prev_sample
a :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
a :Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a :List[str] = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_lowerCamelCase )
| 281 |
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
if n_term == "":
return []
a :list = []
for temp in range(int(UpperCAmelCase_ ) ):
series.append(F'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
snake_case : Tuple = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 281 | 1 |
"""simple docstring"""
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : Any = 16
_lowerCamelCase : List[str] = 32
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
"""simple docstring"""
A_ : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A_ : str = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
A_ : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : List[str] = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : int = 16
elif accelerator.mixed_precision != "no":
A_ : str = 8
else:
A_ : str = None
return tokenizer.pad(
UpperCAmelCase_ , padding='''longest''' , max_length=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
A_ : List[str] = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , drop_last=UpperCAmelCase_ )
A_ : List[Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : Optional[int] = config['lr']
A_ : Optional[int] = int(config['''num_epochs'''] )
A_ : Tuple = int(config['''seed'''] )
A_ : int = int(config['''batch_size'''] )
A_ : Tuple = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A_ : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(UpperCAmelCase_ )
A_ : Optional[int] = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
A_ : Any = AdamW(params=model.parameters() , lr=UpperCAmelCase_ )
# Instantiate scheduler
A_ : str = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ : Optional[Any] = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Now we train the model
for epoch in range(UpperCAmelCase_ ):
model.train()
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A_ : Optional[Any] = model(**UpperCAmelCase_ )
A_ : Tuple = outputs.loss
A_ : Any = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A_ : int = model(**UpperCAmelCase_ )
A_ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
A_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
A_ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , UpperCAmelCase_ )
def lowercase_ ( ):
"""simple docstring"""
A_ : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A_ : int = parser.parse_args()
A_ : List[str] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 167 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : int = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class lowercase__ ( lowercase ):
lowercase__ = """xlm-prophetnet"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[Union[str, Callable]] = "gelu" ,lowerCamelCase__ : Optional[int] = 30522 ,lowerCamelCase__ : Optional[int] = 1024 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[int] = 512 ,lowerCamelCase__ : Optional[float] = 0.0_2 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 2 ,lowerCamelCase__ : Optional[int] = 32 ,lowerCamelCase__ : Optional[int] = 128 ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : Optional[float] = 0.0 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 1 ,lowerCamelCase__ : Optional[int] = 2 ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : str = encoder_ffn_dim
_UpperCamelCase : List[Any] = num_encoder_layers
_UpperCamelCase : Tuple = num_encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : List[Any] = num_decoder_layers
_UpperCamelCase : List[Any] = num_decoder_attention_heads
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : str = init_std # Normal(0, this parameter)
_UpperCamelCase : List[str] = activation_function
# parameters for xlmprophetnet
_UpperCamelCase : Tuple = ngram
_UpperCamelCase : Optional[Any] = num_buckets
_UpperCamelCase : Tuple = relative_max_distance
_UpperCamelCase : str = disable_ngram_loss
_UpperCamelCase : str = eps
# 3 Types of Dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : str = activation_dropout
_UpperCamelCase : List[str] = dropout
_UpperCamelCase : Tuple = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,add_cross_attention=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 83 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCAmelCase = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 98 |
'''simple docstring'''
import functools
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
__magic_name__ = len(a )
__magic_name__ = len(a )
@functools.cache
def min_distance(a , a ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__magic_name__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a ) , 1 + min_distance(a , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : tuple[int, int] , A : int ) -> list[tuple[int, int]]:
UpperCAmelCase_ : Dict = position
UpperCAmelCase_ : Tuple = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : int = []
for position in positions:
UpperCAmelCase_ : Optional[Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_lowercase )
return permissible_positions
def __UpperCAmelCase ( A : list[list[int]] ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def __UpperCAmelCase ( A : list[list[int]] , A : tuple[int, int] , A : int ) -> bool:
if is_complete(_lowercase ):
return True
for position in get_valid_pos(_lowercase , len(_lowercase ) ):
UpperCAmelCase_ : str = position
if board[y][x] == 0:
UpperCAmelCase_ : Union[str, Any] = curr + 1
if open_knight_tour_helper(_lowercase , _lowercase , curr + 1 ):
return True
UpperCAmelCase_ : int = 0
return False
def __UpperCAmelCase ( A : int ) -> list[list[int]]:
UpperCAmelCase_ : Tuple = [[0 for i in range(_lowercase )] for j in range(_lowercase )]
for i in range(_lowercase ):
for j in range(_lowercase ):
UpperCAmelCase_ : Any = 1
if open_knight_tour_helper(_lowercase , (i, j) , 1 ):
return board
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : int = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :int = MODEL_FOR_CAUSAL_LM_MAPPING
__lowerCAmelCase :Dict = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
a__ : Union[str, Any] = text_generator("""This is a test""" , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
a__ : Dict = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__lowercase , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
a__ : List[str] = text_generator("""This is a test""" , do_sample=__lowercase , num_return_sequences=2 , return_tensors=__lowercase )
self.assertEqual(
__lowercase , [
{"""generated_token_ids""": ANY(__lowercase )},
{"""generated_token_ids""": ANY(__lowercase )},
] , )
a__ : List[Any] = text_generator.model.config.eos_token_id
a__ : List[str] = """<pad>"""
a__ : int = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowercase , )
self.assertEqual(
__lowercase , [
[
{"""generated_token_ids""": ANY(__lowercase )},
{"""generated_token_ids""": ANY(__lowercase )},
],
[
{"""generated_token_ids""": ANY(__lowercase )},
{"""generated_token_ids""": ANY(__lowercase )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[int] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
a__ : Dict = text_generator("""This is a test""" , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
a__ : int = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
a__ : Any = TextGenerationPipeline(model=__lowercase , tokenizer=__lowercase )
return text_generator, ["This is a test", "Another test"]
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[Any] = """Hello I believe in"""
a__ : Optional[Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
a__ : Tuple = text_generator(__lowercase )
self.assertEqual(
__lowercase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
a__ : Any = text_generator(__lowercase , stop_sequence=""" fe""" )
self.assertEqual(__lowercase , [{"""generated_text""": """Hello I believe in fe"""}] )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : Optional[int] = text_generator.model
a__ : List[str] = text_generator.tokenizer
a__ : Optional[Any] = text_generator("""This is a test""" )
self.assertEqual(__lowercase , [{"""generated_text""": ANY(__lowercase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
a__ : Tuple = text_generator("""This is a test""" , return_full_text=__lowercase )
self.assertEqual(__lowercase , [{"""generated_text""": ANY(__lowercase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
a__ : List[str] = pipeline(task="""text-generation""" , model=__lowercase , tokenizer=__lowercase , return_full_text=__lowercase )
a__ : Tuple = text_generator("""This is a test""" )
self.assertEqual(__lowercase , [{"""generated_text""": ANY(__lowercase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
a__ : str = text_generator("""This is a test""" , return_full_text=__lowercase )
self.assertEqual(__lowercase , [{"""generated_text""": ANY(__lowercase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
a__ : Union[str, Any] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
[{"""generated_text""": ANY(__lowercase )}, {"""generated_text""": ANY(__lowercase )}],
[{"""generated_text""": ANY(__lowercase )}, {"""generated_text""": ANY(__lowercase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
a__ : List[str] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
[{"""generated_text""": ANY(__lowercase )}, {"""generated_text""": ANY(__lowercase )}],
[{"""generated_text""": ANY(__lowercase )}, {"""generated_text""": ANY(__lowercase )}],
] , )
with self.assertRaises(__lowercase ):
a__ : Any = text_generator("""test""" , return_full_text=__lowercase , return_text=__lowercase )
with self.assertRaises(__lowercase ):
a__ : str = text_generator("""test""" , return_full_text=__lowercase , return_tensors=__lowercase )
with self.assertRaises(__lowercase ):
a__ : Any = text_generator("""test""" , return_text=__lowercase , return_tensors=__lowercase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
a__ : List[Any] = text_generator("""""" )
self.assertEqual(__lowercase , [{"""generated_text""": ANY(__lowercase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
a__ : Tuple = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
a__ : Tuple = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_0_0 , max_new_tokens=2_0 )
a__ : Any = text_generator("""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__lowercase ):
text_generator(
"""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
import torch
# Classic `model_kwargs`
a__ : Optional[Any] = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
a__ : Optional[int] = pipe("""This is a test""" )
self.assertEqual(
__lowercase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
a__ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
a__ : Dict = pipe("""This is a test""" )
self.assertEqual(
__lowercase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
a__ : Tuple = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
a__ : int = pipe("""This is a test""" )
self.assertEqual(
__lowercase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
import torch
a__ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
import torch
a__ : List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__lowercase , top_p=0.5 )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = """Hello world"""
a__ : str = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
a__ : Dict = logging.get_logger("""transformers.generation.tf_utils""" )
else:
a__ : Dict = logging.get_logger("""transformers.generation.utils""" )
a__ : Optional[Any] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__lowercase ) as cl:
a__ : Any = text_generator(__lowercase , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__lowercase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__lowercase ) as cl:
a__ : int = text_generator(__lowercase , max_new_tokens=1 )
self.assertNotIn(__lowercase , cl.out )
with CaptureLogger(__lowercase ) as cl:
a__ : List[str] = text_generator(__lowercase , max_length=1_0 )
self.assertNotIn(__lowercase , cl.out )
| 170 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowerCAmelCase( a__ ):
"""simple docstring"""
def _a ( self , _lowerCamelCase ):
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as input_file:
UpperCamelCase_: str = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
UpperCamelCase_: List[Any] = input_file.read()
UpperCamelCase_: Optional[int] = regexp.search(SCREAMING_SNAKE_CASE_ )
return match
def _a ( self , _lowerCamelCase ):
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as input_file:
UpperCamelCase_: List[str] = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
UpperCamelCase_: List[str] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase_: str = regexp.finditer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_: Optional[Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _a ( self ):
UpperCamelCase_: List[Any] = Path('./datasets' )
UpperCamelCase_: Dict = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(SCREAMING_SNAKE_CASE_ ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def _a ( self ):
UpperCamelCase_: Any = Path('./datasets' )
UpperCamelCase_: Optional[int] = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(SCREAMING_SNAKE_CASE_ ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' ) | 365 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
A_ : str = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
A_ : Optional[int] = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: Dict = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCamelCase_: Tuple = int(re.match(R'.*layer_(\d*).*' , UpperCAmelCase__ )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def snake_case (UpperCAmelCase__ ) -> List[str]:
if dtype == torch.bool:
return 1 / 8
UpperCamelCase_: Optional[Any] = re.search(R'[^\d](\d+)$' , str(UpperCAmelCase__ ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCamelCase_: List[Any] = int(bit_search.groups()[0] )
return bit_size // 8
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
# Construct model
if bloom_config_file == "":
UpperCamelCase_: List[str] = BloomConfig()
else:
UpperCamelCase_: List[str] = BloomConfig.from_json_file(UpperCAmelCase__ )
if shard_model:
UpperCamelCase_: str = os.listdir(UpperCAmelCase__ )
UpperCamelCase_: List[str] = sorted(filter(lambda UpperCAmelCase__ : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase__ ) )
UpperCamelCase_: Optional[int] = {'weight_map': {}, 'metadata': {}}
UpperCamelCase_: str = 0
UpperCamelCase_: Optional[Any] = None
UpperCamelCase_: int = BloomConfig()
for j, file in enumerate(UpperCAmelCase__ ):
print('Processing file: {}'.format(UpperCAmelCase__ ) )
UpperCamelCase_: Tuple = None
for i in range(UpperCAmelCase__ ):
# load all TP files
UpperCamelCase_: List[Any] = file.replace('model_00' , F'''model_0{i}''' )
UpperCamelCase_: List[str] = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location='cpu' )
# Rename keys in the transformers names
UpperCamelCase_: Optional[int] = list(temp.keys() )
for key in keys:
UpperCamelCase_: List[Any] = temp.pop(UpperCAmelCase__ )
if tensors is None:
UpperCamelCase_: Dict = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase_: List[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase_: Dict = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCamelCase_: Optional[int] = tensors[key] / pretraining_tp
torch.save(
UpperCAmelCase__ , os.path.join(
UpperCAmelCase__ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCamelCase_: int = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCamelCase_: Dict = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) )
UpperCamelCase_: Union[str, Any] = BloomConfig()
UpperCamelCase_: Any = pytorch_dump_folder_path + '/' + CONFIG_NAME
UpperCamelCase_: Optional[int] = total_size
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCAmelCase__ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
UpperCamelCase_: Tuple = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + '\n'
f.write(UpperCAmelCase__ )
else:
UpperCamelCase_: Optional[Any] = BloomModel(UpperCAmelCase__ )
UpperCamelCase_: Tuple = os.listdir(UpperCAmelCase__ )
UpperCamelCase_: Tuple = sorted(filter(lambda UpperCAmelCase__ : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase__ ) )
UpperCamelCase_: Tuple = None
for i, file in enumerate(UpperCAmelCase__ ):
UpperCamelCase_: Union[str, Any] = None
for i in range(UpperCAmelCase__ ):
# load all TP files
UpperCamelCase_: Any = file.replace('model_00' , F'''model_0{i}''' )
UpperCamelCase_: Union[str, Any] = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location='cpu' )
# Rename keys in the transformers names
UpperCamelCase_: Dict = list(temp.keys() )
for key in keys:
UpperCamelCase_: Any = temp.pop(UpperCAmelCase__ )
if tensors is None:
UpperCamelCase_: Any = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase_: int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase_: Optional[int] = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCamelCase_: Tuple = tensors[key] / pretraining_tp
UpperCamelCase_: Any = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCamelCase_: Any = set(other_keys.missing_keys )
else:
UpperCamelCase_: int = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
UpperCamelCase_: str = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCamelCase_: Tuple = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
A_ : Any = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 292 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 321 |
import string
def UpperCamelCase (lowercase_: str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
A__ : Dict = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
A__ : Dict = string.ascii_uppercase.find(lowercase_ )
A__ : Optional[int] = num - key
if num < 0:
A__ : Optional[int] = num + len(string.ascii_uppercase )
A__ : Any = translated + string.ascii_uppercase[num]
else:
A__ : Optional[Any] = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def UpperCamelCase () -> None:
A__ : Optional[Any] = input("""Encrypted message: """ )
A__ : Optional[Any] = message.upper()
decrypt(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 192 | 0 |
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = ["""vqvae"""]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , mel=lowerCAmelCase__ , vqvae=lowerCAmelCase__ )
def _lowercase ( self ) -> int:
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , lowerCAmelCase__ ) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
a__ : List[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCAmelCase__ )
a__ : Tuple =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
a__ : List[str] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a__ : Optional[Any] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowerCAmelCase__ , device=self.device , )
a__ : List[str] =noise
a__ : Optional[Any] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =self.mel.audio_slice_to_image(lowerCAmelCase__ )
a__ : List[Any] =np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
a__ : Optional[Any] =(input_image / 2_5_5) * 2 - 1
a__ : Dict =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
a__ : str =self.vqvae.encode(torch.unsqueeze(lowerCAmelCase__ , 0 ) ).latent_dist.sample(
generator=lowerCAmelCase__ )[0]
a__ : Any =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a__ : Optional[int] =self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler.timesteps[start_step - 1] )
a__ : Tuple =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a__ : Union[str, Any] =int(mask_start_secs * pixels_per_second )
a__ : List[str] =int(mask_end_secs * pixels_per_second )
a__ : Optional[Any] =self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowerCAmelCase__ ):
a__ : List[str] =self.unet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["sample"]
else:
a__ : Optional[Any] =self.unet(lowerCAmelCase__ , lowerCAmelCase__ )["sample"]
if isinstance(self.scheduler , lowerCAmelCase__ ):
a__ : int =self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , )["prev_sample"]
else:
a__ : str =self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__ , )["prev_sample"]
if mask is not None:
if mask_start > 0:
a__ : List[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
a__ : Union[str, Any] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a__ : Any =1 / self.vqvae.config.scaling_factor * images
a__ : str =self.vqvae.decode(lowerCAmelCase__ )["sample"]
a__ : str =(images / 2 + 0.5).clamp(0 , 1 )
a__ : int =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
a__ : List[Any] =(images * 2_5_5).round().astype("uint8" )
a__ : Dict =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCAmelCase__ , mode="RGB" ).convert("L" ) for _ in images) )
a__ : str =[self.mel.image_to_audio(lowerCAmelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCAmelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCAmelCase__ ) )
@torch.no_grad()
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = 5_0 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowerCAmelCase__ )
self.scheduler.set_timesteps(lowerCAmelCase__ )
a__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
a__ : Tuple =(sample / 2_5_5) * 2 - 1
a__ : List[Any] =torch.Tensor(lowerCAmelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
a__ : str =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a__ : Dict =self.scheduler.alphas_cumprod[t]
a__ : Optional[Any] =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a__ : Optional[Any] =1 - alpha_prod_t
a__ : str =self.unet(lowerCAmelCase__ , lowerCAmelCase__ )["sample"]
a__ : Optional[Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
a__ : List[str] =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a__ : Optional[Any] =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> torch.Tensor:
'''simple docstring'''
a__ : Any =acos(torch.dot(torch.flatten(lowerCAmelCase__ ) , torch.flatten(lowerCAmelCase__ ) ) / torch.norm(lowerCAmelCase__ ) / torch.norm(lowerCAmelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowerCAmelCase__ ) + sin(alpha * theta ) * xa / sin(lowerCAmelCase__ )
| 148 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = a.name
UpperCAmelCase = b.name
UpperCAmelCase = ''
UpperCAmelCase = ''
UpperCAmelCase = a == b
UpperCAmelCase = name_a
UpperCAmelCase = name_b
return res
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase_ , lowercase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase_ , lowercase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowercase_ , lowercase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase_ , lowercase_ )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
for n in graph_proto.node:
_node_replace_input_with(lowercase_ , lowercase_ , lowercase_ )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = list(model.graph.initializer )
UpperCAmelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase = inits[i].name
UpperCAmelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowercase_ , lowercase_ )
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = os.path.dirname(lowercase_ )
UpperCAmelCase = os.path.basename(lowercase_ )
UpperCAmelCase = onnx.load(os.path.join(lowercase_ , lowercase_ ) )
UpperCAmelCase = list(model.graph.initializer )
UpperCAmelCase = set()
UpperCAmelCase = {}
UpperCAmelCase = []
UpperCAmelCase = 0
for i in range(len(lowercase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowercase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowercase_ )
dup_set.add(lowercase_ )
UpperCAmelCase = inits[j].data_type
UpperCAmelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , lowercase_ )
total_reduced_size += mem_size
UpperCAmelCase = inits[i].name
UpperCAmelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase_ )
else:
UpperCAmelCase = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
UpperCAmelCase = sorted(lowercase_ )
_remove_dup_initializers_from_model(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = 'optimized_' + model_file_name
UpperCAmelCase = os.path.join(lowercase_ , lowercase_ )
onnx.save(lowercase_ , lowercase_ )
return new_model
| 78 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
_a : List[str] = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
_a : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a : Union[str, Any] = []
for batch in dl:
_a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
_a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
_a : str = get_lens(lowerCAmelCase_ )
_a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
_a : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 89 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( a__ : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(a__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(a__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 |
import copy
import re
class lowercase_ :
A__ : Optional[Any] = """hp"""
A__ : Union[str, Any] = {}
A__ : Optional[int] = None
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = prefix
UpperCamelCase_ = defaults
cls.build_naming_info()
@staticmethod
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return ""
UpperCamelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__UpperCamelCase ) + 1 ):
UpperCamelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCamelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__UpperCamelCase ):
UpperCamelCase_ = """"""
while integer != 0:
UpperCamelCase_ = chr(ord("""A""" ) + integer % 1_0 ) + s
integer //= 1_0
return s
UpperCamelCase_ = 0
while True:
UpperCamelCase_ = word + """#""" + int_to_alphabetic(__UpperCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
UpperCamelCase_ = sword
break
UpperCamelCase_ = short_word
UpperCamelCase_ = word
return short_word
@staticmethod
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = param_name.split("""_""" )
UpperCamelCase_ = [TrialShortNamer.shortname_for_word(__UpperCamelCase , __UpperCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCamelCase_ = ["""""", """_"""]
for separator in separators:
UpperCamelCase_ = separator.join(__UpperCamelCase )
if shortname not in info["reverse_short_param"]:
UpperCamelCase_ = shortname
UpperCamelCase_ = param_name
return shortname
return param_name
@staticmethod
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = TrialShortNamer.shortname_for_key(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = short_name
UpperCamelCase_ = param_name
@classmethod
def lowerCamelCase_ ( cls ):
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
UpperCamelCase_ = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
UpperCamelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = info
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase ):
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCamelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCamelCase_ = cls.NAMING_INFO["""short_param"""][k]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = 1 if v else 0
UpperCamelCase_ = """""" if isinstance(__UpperCamelCase , (int, float) ) else """-"""
UpperCamelCase_ = f'''{key}{sep}{v}'''
name.append(__UpperCamelCase )
return "_".join(__UpperCamelCase )
@classmethod
def lowerCamelCase_ ( cls , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCamelCase_ = []
else:
UpperCamelCase_ = repr.split("""_""" )
UpperCamelCase_ = {}
for value in values:
if "-" in value:
UpperCamelCase_ , UpperCamelCase_ = value.split("""-""" )
else:
UpperCamelCase_ = re.sub("""[0-9.]""" , """""" , __UpperCamelCase )
UpperCamelCase_ = float(re.sub("""[^0-9.]""" , """""" , __UpperCamelCase ) )
UpperCamelCase_ = cls.NAMING_INFO["""reverse_short_param"""][p_k]
UpperCamelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCamelCase_ = cls.DEFAULTS[k]
return parameters
| 261 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ['''torch''']
def __init__( self : Union[str, Any] ,*A_ : Tuple ,**A_ : Optional[int] ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Optional[int] ,**A_ : Tuple ) -> Optional[int]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : List[Any] ,**A_ : Dict ) -> int:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : Optional[int] ,*A_ : Optional[Any] ,**A_ : Union[str, Any] ) -> Optional[int]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : List[Any] ,**A_ : List[str] ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Union[str, Any] ,**A_ : Dict ) -> List[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : Any ,*A_ : List[Any] ,**A_ : Dict ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Dict ,**A_ : Tuple ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : str ,**A_ : List[str] ) -> List[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : Any ,*A_ : Union[str, Any] ,**A_ : List[Any] ) -> Optional[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Union[str, Any] ,**A_ : Union[str, Any] ) -> List[Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : List[str] ,**A_ : List[Any] ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = ['''torch''']
def __init__( self : Optional[int] ,*A_ : str ,**A_ : List[str] ) -> List[str]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Optional[Any] ,**A_ : List[Any] ) -> List[Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : int ,**A_ : List[Any] ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : str ,*A_ : Optional[Any] ,**A_ : Optional[int] ) -> List[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : List[str] ,**A_ : Optional[int] ) -> Optional[Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : int ,**A_ : int ) -> str:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : List[str] ,*A_ : Union[str, Any] ,**A_ : Optional[int] ) -> int:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : List[Any] ,**A_ : str ) -> int:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : Union[str, Any] ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = ['''torch''']
def __init__( self : List[Any] ,*A_ : int ,**A_ : int ) -> Optional[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : int ,**A_ : Tuple ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Optional[Any] ,**A_ : Tuple ) -> Optional[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = ['''torch''']
def __init__( self : List[str] ,*A_ : int ,**A_ : str ) -> Union[str, Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : List[Any] ,**A_ : Optional[int] ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : Tuple ,**A_ : Any ) -> Any:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = ['''torch''']
def __init__( self : Any ,*A_ : Optional[Any] ,**A_ : List[Any] ) -> Optional[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : str ,**A_ : List[str] ) -> Dict:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : int ,**A_ : Any ) -> int:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : Tuple ,*A_ : Tuple ,**A_ : Tuple ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : Tuple ) -> Tuple:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Union[str, Any] ,**A_ : Dict ) -> str:
requires_backends(cls ,['torch'] )
def _snake_case ( *snake_case__ : Any , **snake_case__ : Dict ):
requires_backends(snake_case__ , ['torch'] )
def _snake_case ( *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
requires_backends(snake_case__ , ['torch'] )
def _snake_case ( *snake_case__ : str , **snake_case__ : str ):
requires_backends(snake_case__ , ['torch'] )
def _snake_case ( *snake_case__ : int , **snake_case__ : List[Any] ):
requires_backends(snake_case__ , ['torch'] )
def _snake_case ( *snake_case__ : List[str] , **snake_case__ : Any ):
requires_backends(snake_case__ , ['torch'] )
def _snake_case ( *snake_case__ : Any , **snake_case__ : Any ):
requires_backends(snake_case__ , ['torch'] )
def _snake_case ( *snake_case__ : int , **snake_case__ : Tuple ):
requires_backends(snake_case__ , ['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : Optional[Any] ,*A_ : Optional[int] ,**A_ : Dict ) -> Dict:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Any ,**A_ : List[Any] ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Optional[int] ,**A_ : Union[str, Any] ) -> List[str]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = ['''torch''']
def __init__( self : Tuple ,*A_ : List[str] ,**A_ : List[Any] ) -> Optional[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Optional[int] ,**A_ : Union[str, Any] ) -> List[Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Tuple ,**A_ : Union[str, Any] ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''torch''']
def __init__( self : str ,*A_ : Union[str, Any] ,**A_ : Any ) -> Tuple:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Optional[Any] ,**A_ : List[Any] ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Optional[int] ,**A_ : List[Any] ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ['''torch''']
def __init__( self : Optional[Any] ,*A_ : Any ,**A_ : Any ) -> Tuple:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : List[str] ,**A_ : Dict ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : List[str] ,**A_ : Optional[int] ) -> Dict:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : Optional[int] ,*A_ : Any ,**A_ : List[str] ) -> Union[str, Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Any ,**A_ : List[str] ) -> Dict:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : int ,**A_ : Dict ) -> List[str]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : List[str] ,*A_ : Dict ,**A_ : Optional[int] ) -> Dict:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Dict ,**A_ : Any ) -> int:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Optional[Any] ,**A_ : Tuple ) -> int:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ['''torch''']
def __init__( self : List[str] ,*A_ : List[str] ,**A_ : int ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Tuple ,**A_ : Optional[int] ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : List[Any] ,**A_ : Any ) -> Any:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : Optional[Any] ,*A_ : str ,**A_ : Optional[int] ) -> Dict:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : List[str] ,**A_ : Optional[Any] ) -> Optional[Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : List[str] ,**A_ : Tuple ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''torch''']
def __init__( self : str ,*A_ : Union[str, Any] ,**A_ : Dict ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : List[Any] ,**A_ : Any ) -> Optional[Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : List[str] ,**A_ : Optional[Any] ) -> int:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : int ,*A_ : int ,**A_ : Union[str, Any] ) -> str:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : int ,**A_ : List[Any] ) -> Tuple:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Union[str, Any] ,**A_ : Any ) -> Dict:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = ['''torch''']
def __init__( self : str ,*A_ : Optional[Any] ,**A_ : List[str] ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Optional[Any] ,**A_ : str ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : int ,**A_ : Tuple ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''torch''']
def __init__( self : List[Any] ,*A_ : List[str] ,**A_ : List[str] ) -> int:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Dict ,**A_ : int ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Tuple ,**A_ : Optional[Any] ) -> Optional[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ['''torch''']
def __init__( self : Any ,*A_ : List[str] ,**A_ : Any ) -> Tuple:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : Tuple ,**A_ : List[Any] ) -> Dict:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : List[str] ,**A_ : Optional[Any] ) -> List[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''torch''']
def __init__( self : Tuple ,*A_ : List[str] ,**A_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Any ,**A_ : Optional[int] ) -> List[Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Dict ,**A_ : Optional[int] ) -> str:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : Any ,*A_ : Optional[int] ,**A_ : List[Any] ) -> str:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : List[Any] ,**A_ : List[str] ) -> Tuple:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Union[str, Any] ,**A_ : Any ) -> Optional[int]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : List[Any] ,*A_ : Dict ,**A_ : Tuple ) -> Optional[int]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : int ,**A_ : Tuple ) -> Tuple:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : str ,**A_ : Dict ) -> str:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = ['''torch''']
def __init__( self : Tuple ,*A_ : Union[str, Any] ,**A_ : Optional[int] ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Tuple ,**A_ : Union[str, Any] ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : Optional[int] ) -> List[str]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = ['''torch''']
def __init__( self : Union[str, Any] ,*A_ : Dict ,**A_ : str ) -> Union[str, Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Union[str, Any] ,**A_ : List[Any] ) -> Dict:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : str ,**A_ : Optional[Any] ) -> int:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = ['''torch''']
def __init__( self : List[Any] ,*A_ : Optional[Any] ,**A_ : str ) -> str:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : List[str] ,**A_ : str ) -> int:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : List[Any] ,**A_ : Tuple ) -> Dict:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : List[str] ,*A_ : int ,**A_ : Tuple ) -> List[str]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Dict ,**A_ : List[Any] ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : Optional[Any] ,**A_ : List[Any] ) -> Dict:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : Union[str, Any] ,*A_ : Any ,**A_ : Dict ) -> List[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : int ,**A_ : int ) -> str:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : str ,**A_ : List[str] ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ['''torch''']
def __init__( self : int ,*A_ : Any ,**A_ : Any ) -> int:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : List[Any] ,**A_ : Tuple ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Tuple ,**A_ : str ) -> List[str]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = ['''torch''']
def __init__( self : Tuple ,*A_ : str ,**A_ : List[str] ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : Any ,**A_ : Any ) -> int:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : List[str] ,**A_ : Optional[Any] ) -> Any:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = ['''torch''']
def __init__( self : List[Any] ,*A_ : str ,**A_ : Union[str, Any] ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Dict ,**A_ : Dict ) -> Dict:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : List[Any] ,**A_ : str ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = ['''torch''']
def __init__( self : Union[str, Any] ,*A_ : List[Any] ,**A_ : Optional[int] ) -> Optional[int]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Tuple ,**A_ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Any ,**A_ : List[Any] ) -> Any:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = ['''torch''']
def __init__( self : str ,*A_ : Optional[Any] ,**A_ : Dict ) -> Union[str, Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : List[str] ,**A_ : List[str] ) -> int:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : List[str] ,**A_ : str ) -> List[str]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''torch''']
def __init__( self : List[Any] ,*A_ : Dict ,**A_ : Any ) -> Dict:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : List[str] ,**A_ : List[Any] ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Dict ,**A_ : Optional[int] ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''torch''']
def __init__( self : Optional[int] ,*A_ : List[Any] ,**A_ : int ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : Optional[Any] ) -> int:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : Union[str, Any] ,**A_ : Union[str, Any] ) -> List[str]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ['''torch''']
def __init__( self : int ,*A_ : Optional[Any] ,**A_ : int ) -> Dict:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : Any ,**A_ : str ) -> Dict:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Any ,**A_ : str ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''torch''']
def __init__( self : Optional[Any] ,*A_ : Tuple ,**A_ : Dict ) -> List[str]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Tuple ,**A_ : List[Any] ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Optional[Any] ,**A_ : List[str] ) -> List[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : List[str] ,*A_ : Dict ,**A_ : List[Any] ) -> Optional[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Optional[Any] ,**A_ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Union[str, Any] ,**A_ : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ['''torch''']
def __init__( self : List[str] ,*A_ : Any ,**A_ : int ) -> int:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Tuple ,**A_ : Tuple ) -> Optional[int]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : List[Any] ,**A_ : str ) -> List[str]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = ['''torch''']
def __init__( self : Dict ,*A_ : Tuple ,**A_ : Tuple ) -> List[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : int ,**A_ : Optional[Any] ) -> Tuple:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : str ,**A_ : List[Any] ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = ['''torch''']
def __init__( self : Tuple ,*A_ : Any ,**A_ : Tuple ) -> List[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : List[Any] ,**A_ : Optional[Any] ) -> int:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Dict ,**A_ : Dict ) -> Optional[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ['''torch''']
def __init__( self : Any ,*A_ : Dict ,**A_ : Dict ) -> Tuple:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Optional[Any] ,**A_ : Union[str, Any] ) -> List[str]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Dict ,**A_ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = ['''torch''']
def __init__( self : List[Any] ,*A_ : Any ,**A_ : Union[str, Any] ) -> Tuple:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : List[Any] ,**A_ : str ) -> str:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Optional[int] ,**A_ : Optional[Any] ) -> str:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = ['''torch''']
def __init__( self : Optional[int] ,*A_ : List[str] ,**A_ : int ) -> Optional[Any]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Dict ,**A_ : List[Any] ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : Union[str, Any] ,**A_ : Any ) -> Tuple:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = ['''torch''']
def __init__( self : str ,*A_ : Optional[Any] ,**A_ : Optional[Any] ) -> Optional[int]:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Optional[int] ,**A_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,*A_ : int ,**A_ : str ) -> Optional[Any]:
requires_backends(cls ,['torch'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''torch''']
def __init__( self : List[str] ,*A_ : Tuple ,**A_ : Tuple ) -> Any:
requires_backends(self ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Optional[int] ,**A_ : int ) -> Any:
requires_backends(cls ,['torch'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : str ,**A_ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls ,['torch'] ) | 74 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowercase__ ( lowerCamelCase_ ):
A__ : Dict ='''perceiver'''
def __init__( self : Dict , UpperCAmelCase_ : Dict=256 , UpperCAmelCase_ : str=1280 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Optional[Any]=26 , UpperCAmelCase_ : Dict=8 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[int]="kv" , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=1e-1_2 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Any=262 , UpperCAmelCase_ : Any=2048 , UpperCAmelCase_ : Optional[int]=56 , UpperCAmelCase_ : Tuple=[368, 496] , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : int=1920 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : int=[1, 16, 224, 224] , **UpperCAmelCase_ : List[str] , ):
super().__init__(**__snake_case )
SCREAMING_SNAKE_CASE__ = num_latents
SCREAMING_SNAKE_CASE__ = d_latents
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = num_blocks
SCREAMING_SNAKE_CASE__ = num_self_attends_per_block
SCREAMING_SNAKE_CASE__ = num_self_attention_heads
SCREAMING_SNAKE_CASE__ = num_cross_attention_heads
SCREAMING_SNAKE_CASE__ = qk_channels
SCREAMING_SNAKE_CASE__ = v_channels
SCREAMING_SNAKE_CASE__ = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE__ = self_attention_widening_factor
SCREAMING_SNAKE_CASE__ = cross_attention_widening_factor
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE__ = image_size
# flow attributes
SCREAMING_SNAKE_CASE__ = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE__ = num_frames
SCREAMING_SNAKE_CASE__ = audio_samples_per_frame
SCREAMING_SNAKE_CASE__ = samples_per_patch
SCREAMING_SNAKE_CASE__ = output_shape
class lowercase__ ( lowerCamelCase_ ):
@property
def A_ ( self : int ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def A_ ( self : Tuple ):
return 1e-4
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__snake_case , __snake_case ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ = preprocessor.num_special_tokens_to_add(__snake_case )
SCREAMING_SNAKE_CASE__ = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ = [' '.join(['a'] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ = dict(preprocessor(__snake_case , return_tensors=__snake_case ) )
SCREAMING_SNAKE_CASE__ = inputs.pop('input_ids' )
return inputs
elif isinstance(__snake_case , __snake_case ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ = compute_effective_axis_dimension(__snake_case , fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE__ = self._generate_dummy_images(__snake_case , __snake_case , __snake_case , __snake_case )
SCREAMING_SNAKE_CASE__ = dict(preprocessor(images=__snake_case , return_tensors=__snake_case ) )
SCREAMING_SNAKE_CASE__ = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 371 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 169 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_lowerCAmelCase : Tuple = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 169 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "informer"
snake_case = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "student_t" , _SCREAMING_SNAKE_CASE = "nll" , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "mean" , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 0.0_5 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE = "prob" , _SCREAMING_SNAKE_CASE = 5 , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , )->Dict:
'''simple docstring'''
A_ : List[Any] = prediction_length
A_ : Dict = context_length or prediction_length
A_ : Optional[int] = distribution_output
A_ : List[str] = loss
A_ : Any = input_size
A_ : Dict = num_time_features
A_ : Union[str, Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : Dict = scaling
A_ : List[Any] = num_dynamic_real_features
A_ : Optional[int] = num_static_real_features
A_ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
A_ : Tuple = cardinality
else:
A_ : Optional[int] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
A_ : Optional[int] = embedding_dimension
else:
A_ : Union[str, Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
A_ : Dict = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : int = d_model
A_ : List[Any] = encoder_attention_heads
A_ : Any = decoder_attention_heads
A_ : Union[str, Any] = encoder_ffn_dim
A_ : List[Any] = decoder_ffn_dim
A_ : Union[str, Any] = encoder_layers
A_ : Union[str, Any] = decoder_layers
A_ : Tuple = dropout
A_ : Tuple = attention_dropout
A_ : List[Any] = activation_dropout
A_ : Any = encoder_layerdrop
A_ : Optional[int] = decoder_layerdrop
A_ : List[Any] = activation_function
A_ : Union[str, Any] = init_std
A_ : List[str] = use_cache
# Informer
A_ : Tuple = attention_type
A_ : Union[str, Any] = sampling_factor
A_ : List[Any] = distil
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self )->int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 65 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
UpperCamelCase = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
UpperCamelCase = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def _SCREAMING_SNAKE_CASE ( ):
A_ : Dict = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bootstrap_aggregation=SCREAMING_SNAKE_CASE , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : List[Any] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bootstrap_aggregation=SCREAMING_SNAKE_CASE , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def _SCREAMING_SNAKE_CASE ( ):
A_ : Any = '''rougeLsum'''
A_ : List[str] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k]
A_ : List[str] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k]
assert score > score_no_sep
def _SCREAMING_SNAKE_CASE ( ):
A_ : Optional[int] = ['''rouge1''', '''rouge2''', '''rougeL''']
A_ : Optional[int] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=SCREAMING_SNAKE_CASE )
A_ : Optional[int] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE , rouge_keys=SCREAMING_SNAKE_CASE )
assert score_sep == score_no_sep
def _SCREAMING_SNAKE_CASE ( ):
A_ : Union[str, Any] = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
A_ : Optional[int] = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE ) == calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , newline_sep=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
A_ : List[Any] = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
A_ : Optional[Any] = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
A_ : List[str] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rouge_keys=['''rougeLsum'''] , newline_sep=SCREAMING_SNAKE_CASE )['''rougeLsum''']
A_ : Optional[Any] = calculate_rouge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def _SCREAMING_SNAKE_CASE ( ):
A_ : Any = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
A_ : List[Any] = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : List[str] = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=SCREAMING_SNAKE_CASE )
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 65 | 1 |
def A_ ( A__ , A__ ) -> float:
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 99 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
_SCREAMING_SNAKE_CASE : int = parser.parse_args()
_SCREAMING_SNAKE_CASE : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPImageProcessor()
_SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
_SCREAMING_SNAKE_CASE : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 85 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 360 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
snake_case_ = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
snake_case_ = '''</w>'''
snake_case_ = '''@@ '''
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[Any] = set()
lowercase__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Optional[int] = char
return pairs
# Speech2Text2 has no max input length
snake_case_ = {'''facebook/s2t-wav2vec2-large-en-de''': 1_024}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : List[str] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , a , a="<s>" , a="<pad>" , a="</s>" , a="<unk>" , a=False , a=None , **a , ):
super().__init__(
unk_token=a , bos_token=a , eos_token=a , pad_token=a , do_lower_case=a , **a , )
lowercase__ : str = do_lower_case
with open(a , encoding='utf-8') as vocab_handle:
lowercase__ : Tuple = json.load(a)
lowercase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""")
lowercase__ : int = None
lowercase__ : List[Any] = None
else:
with open(a , encoding='utf-8') as merges_handle:
lowercase__ : List[Any] = merges_handle.read().split('\n')[:-1]
lowercase__ : Optional[int] = [tuple(merge.split()[:2]) for merge in merges]
lowercase__ : Tuple = dict(zip(a , range(len(a))))
lowercase__ : List[str] = {}
@property
def snake_case_ ( self):
return len(self.decoder)
def snake_case_ ( self):
return dict(self.encoder , **self.added_tokens_encoder)
def snake_case_ ( self , a):
lowercase__ : int = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowercase__ : Any = get_pairs(a)
if not pairs:
return token
while True:
lowercase__ : List[str] = min(a , key=lambda a: self.bpe_ranks.get(a , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : Dict = bigram
lowercase__ : Union[str, Any] = []
lowercase__ : int = 0
while i < len(a):
try:
lowercase__ : Dict = word.index(a , a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
lowercase__ : Optional[int] = j
if word[i] == first and i < len(a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ : str = tuple(a)
lowercase__ : Union[str, Any] = new_word
if len(a) == 1:
break
else:
lowercase__ : Optional[Any] = get_pairs(a)
lowercase__ : List[str] = ' '.join(a)
if word == "\n " + BPE_TOKEN_MERGES:
lowercase__ : Optional[int] = '\n' + BPE_TOKEN_MERGES
if word.endswith(a):
lowercase__ : Dict = word.replace(a , '')
lowercase__ : int = word.replace(' ' , a)
lowercase__ : List[str] = word
return word
def snake_case_ ( self , a):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.')
if self.do_lower_case:
lowercase__ : int = text.lower()
lowercase__ : Optional[int] = text.split()
lowercase__ : Optional[int] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(a).split(' ')))
return split_tokens
def snake_case_ ( self , a):
return self.encoder.get(a , self.encoder.get(self.unk_token))
def snake_case_ ( self , a):
lowercase__ : Union[str, Any] = self.decoder.get(a , self.unk_token)
return result
def snake_case_ ( self , a):
lowercase__ : Union[str, Any] = ' '.join(a)
# make sure @@ tokens are concatenated
lowercase__ : Optional[int] = ''.join(string.split(a))
return string
def snake_case_ ( self , a , a = None):
if not os.path.isdir(a):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
lowercase__ : Optional[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ : List[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(a , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a) + '\n')
lowercase__ : Optional[Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(a , 'w' , encoding='utf-8') as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!')
lowercase__ : Dict = token_index
writer.write(' '.join(a) + '\n')
index += 1
return (vocab_file, merges_file)
| 216 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = CTRLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__magic_name__ : Any = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
__magic_name__ : Tuple = dict(zip(_a , range(len(_a ) ) ) )
__magic_name__ : int = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
__magic_name__ : Tuple = {"unk_token": "<unk>"}
__magic_name__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__magic_name__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_a ) )
def SCREAMING_SNAKE_CASE ( self , **_a ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Dict = "adapt react readapt apt"
__magic_name__ : Dict = "adapt react readapt apt"
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__magic_name__ : Tuple = "adapt react readapt apt"
__magic_name__ : Tuple = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
__magic_name__ : Dict = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__magic_name__ : Dict = tokens + [tokenizer.unk_token]
__magic_name__ : str = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
| 281 |
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
__magic_name__ : Union[str, Any] = len(_snake_case ) + 1
__magic_name__ : List[str] = len(_snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__magic_name__ : str = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
# since string of zero length match pattern of zero length
__magic_name__ : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _snake_case ):
__magic_name__ : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _snake_case ):
__magic_name__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _snake_case ):
for j in range(1 , _snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__magic_name__ : Optional[int] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__magic_name__ : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__magic_name__ : List[Any] = dp[i - 1][j]
else:
__magic_name__ : Union[str, Any] = 0
else:
__magic_name__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
snake_case : Optional[Any] = "aab"
snake_case : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 281 | 1 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Optional[int] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 159 |
import os
from datetime import datetime as dt
from github import Github
_lowerCamelCase : List[Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase : Any = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase : Tuple = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase : Tuple = sorted(issue.get_comments() , key=lambda UpperCamelCase_ : i.created_at , reverse=UpperCamelCase_ )
_lowerCAmelCase : List[Any] = comments[0] if len(UpperCamelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 159 | 1 |
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCAmelCase__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ : int = 256
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["melgan"]
def __init__( self : List[str] ,lowerCamelCase__ : SpectrogramNotesEncoder ,lowerCamelCase__ : SpectrogramContEncoder ,lowerCamelCase__ : TaFilmDecoder ,lowerCamelCase__ : DDPMScheduler ,lowerCamelCase__ : OnnxRuntimeModel if is_onnx_available() else Any ,):
super().__init__()
# From MELGAN
UpperCAmelCase__ = math.log(1e-5 ) # Matches MelGAN training.
UpperCAmelCase__ = 4.0 # Largest value for most examples
UpperCAmelCase__ = 128
self.register_modules(
notes_encoder=lowerCamelCase__ ,continuous_encoder=lowerCamelCase__ ,decoder=lowerCamelCase__ ,scheduler=lowerCamelCase__ ,melgan=lowerCamelCase__ ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str]=(-1.0, 1.0) ,lowerCamelCase__ : str=False ):
UpperCAmelCase__ , UpperCAmelCase__ = output_range
if clip:
UpperCAmelCase__ = torch.clip(lowerCamelCase__ ,self.min_value ,self.max_value )
# Scale to [0, 1].
UpperCAmelCase__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple=(-1.0, 1.0) ,lowerCamelCase__ : List[str]=False ):
UpperCAmelCase__ , UpperCAmelCase__ = input_range
UpperCAmelCase__ = torch.clip(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) if clip else outputs
# Scale to [0, 1].
UpperCAmelCase__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : List[Any] ):
UpperCAmelCase__ = input_tokens > 0
UpperCAmelCase__ , UpperCAmelCase__ = self.notes_encoder(
encoder_input_tokens=lowerCamelCase__ ,encoder_inputs_mask=lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = self.continuous_encoder(
encoder_inputs=lowerCamelCase__ ,encoder_inputs_mask=lowerCamelCase__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = noise_time
if not torch.is_tensor(lowerCamelCase__ ):
UpperCAmelCase__ = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
UpperCAmelCase__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase__ = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device )
UpperCAmelCase__ = self.decoder(
encodings_and_masks=lowerCamelCase__ ,decoder_input_tokens=lowerCamelCase__ ,decoder_noise_time=lowerCamelCase__ )
return logits
@torch.no_grad()
def __call__( self : Any ,lowerCamelCase__ : List[List[int]] ,lowerCamelCase__ : Optional[torch.Generator] = None ,lowerCamelCase__ : int = 100 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : str = "numpy" ,lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,lowerCamelCase__ : int = 1 ,):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase__ ,lowerCamelCase__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(lowerCamelCase__ )}.''' )
UpperCAmelCase__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa )
UpperCAmelCase__ = np.zeros([1, 0, self.n_dims] ,np.floataa )
UpperCAmelCase__ = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=lowerCamelCase__ ,device=self.device )
for i, encoder_input_tokens in enumerate(lowerCamelCase__ ):
if i == 0:
UpperCAmelCase__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device ,dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCAmelCase__ = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=lowerCamelCase__ ,device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCAmelCase__ = ones
UpperCAmelCase__ = self.scale_features(
lowerCamelCase__ ,output_range=[-1.0, 1.0] ,clip=lowerCamelCase__ )
UpperCAmelCase__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) ,continuous_inputs=lowerCamelCase__ ,continuous_mask=lowerCamelCase__ ,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCAmelCase__ = randn_tensor(
shape=encoder_continuous_inputs.shape ,generator=lowerCamelCase__ ,device=self.device ,dtype=self.decoder.dtype ,)
# set step values
self.scheduler.set_timesteps(lowerCamelCase__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase__ = self.decode(
encodings_and_masks=lowerCamelCase__ ,input_tokens=lowerCamelCase__ ,noise_time=t / self.scheduler.config.num_train_timesteps ,)
# Compute previous output: x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,generator=lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = self.scale_to_features(lowerCamelCase__ ,input_range=[-1.0, 1.0] )
UpperCAmelCase__ = mel[:1]
UpperCAmelCase__ = mel.cpu().float().numpy()
UpperCAmelCase__ = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase__ ,lowerCamelCase__ )
logger.info('Generated segment' ,lowerCamelCase__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
UpperCAmelCase__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCAmelCase__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowerCamelCase__ )
| 98 | """simple docstring"""
def a_ ( lowerCamelCase ):
return str(lowerCamelCase ) == str(lowerCamelCase )[::-1]
def a_ ( lowerCamelCase ):
return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] )
def a_ ( lowerCamelCase = 1_0_0_0_0 ):
UpperCAmelCase__ = []
for num in range(1 , lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = num
while iterations < 5_0:
UpperCAmelCase__ = sum_reverse(lowerCamelCase )
iterations += 1
if is_palindrome(lowerCamelCase ):
break
else:
lychrel_nums.append(lowerCamelCase )
return len(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | 1 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase__ ,unittest.TestCase ):
A_ = DDIMPipeline
A_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
A_ = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
A_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
A_ = False
def UpperCAmelCase__ ( self : Optional[Any] )->List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
__lowerCAmelCase : Optional[int] = DDIMScheduler()
__lowerCAmelCase : Optional[int] = {"""unet""": unet, """scheduler""": scheduler}
return components
def UpperCAmelCase__ ( self : str , _snake_case : Dict , _snake_case : List[Any]=0 )->str:
'''simple docstring'''
if str(_snake_case ).startswith("""mps""" ):
__lowerCAmelCase : Union[str, Any] = torch.manual_seed(_snake_case )
else:
__lowerCAmelCase : List[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__lowerCAmelCase : Any = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : str )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = """cpu"""
__lowerCAmelCase : Optional[int] = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : int = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : str = pipe(**_snake_case ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
__lowerCAmelCase : int = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
__lowerCAmelCase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_snake_case , 1E-3 )
def UpperCAmelCase__ ( self : Any )->Optional[int]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : Optional[Any] )->Tuple:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : str )->List[str]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : Tuple )->Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = """google/ddpm-cifar10-32"""
__lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained(_snake_case )
__lowerCAmelCase : str = DDIMScheduler()
__lowerCAmelCase : str = DDIMPipeline(unet=_snake_case , scheduler=_snake_case )
ddim.to(_snake_case )
ddim.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : str = torch.manual_seed(0 )
__lowerCAmelCase : int = ddim(generator=_snake_case , eta=0.0 , output_type="""numpy""" ).images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : Optional[Any] = np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Optional[Any] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = """google/ddpm-ema-bedroom-256"""
__lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(_snake_case )
__lowerCAmelCase : Dict = DDIMScheduler.from_pretrained(_snake_case )
__lowerCAmelCase : Dict = DDIMPipeline(unet=_snake_case , scheduler=_snake_case )
ddpm.to(_snake_case )
ddpm.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCAmelCase : Dict = ddpm(generator=_snake_case , output_type="""numpy""" ).images
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCAmelCase : Optional[int] = np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 356 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> bool:
__lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE )
# We need to create solution object to save path.
__lowerCAmelCase : str = [[0 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
__lowerCAmelCase : str = run_maze(SCREAMING_SNAKE_CASE , 0 , 0 , SCREAMING_SNAKE_CASE )
if solved:
print("""\n""".join(str(SCREAMING_SNAKE_CASE ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[list[int]] ) -> bool:
__lowerCAmelCase : List[str] = len(SCREAMING_SNAKE_CASE )
# Final check point.
if i == j == (size - 1):
__lowerCAmelCase : str = 1
return True
__lowerCAmelCase : Optional[Any] = (not i < 0) and (not j < 0) # Check lower bounds
__lowerCAmelCase : Optional[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowerCAmelCase : int = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowerCAmelCase : Tuple = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE , i + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j + 1 , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , i - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j - 1 , SCREAMING_SNAKE_CASE )
):
return True
__lowerCAmelCase : Tuple = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 232 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class lowercase__ ( lowercase_ , lowercase_):
UpperCamelCase_ = """convnextv2"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : List[Any]=1E-12 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Optional[Any]=224 , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = num_stages
SCREAMING_SNAKE_CASE : int = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
SCREAMING_SNAKE_CASE : str = [3, 3, 9, 3] if depths is None else depths
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 182 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292 | 0 |
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: int = 100 ) -> int:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = set()
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : Optional[Any] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
__lowerCamelCase : Union[str, Any] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip()))) | 64 | """simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : List[Any] = ort.SessionOptions()
__lowerCamelCase : List[Any] = False
return options
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
__lowerCamelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
__lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
__lowerCamelCase : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Dict = "A red cat sitting on a park bench"
__lowerCamelCase : List[str] = np.random.RandomState(0 )
__lowerCamelCase : str = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=UpperCAmelCase , output_type="np" , )
__lowerCamelCase : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2 | 64 | 1 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
a_ : List[str] = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def __snake_case ( UpperCAmelCase_ : Any ):
lowerCamelCase_ = {}
state_dict.pop("pixel_mean" , UpperCAmelCase_ )
state_dict.pop("pixel_std" , UpperCAmelCase_ )
lowerCamelCase_ = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase_ = key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase_ = int(re.match(UpperCAmelCase_ , UpperCAmelCase_ ).group(2 ) )
if layer_nb == 0:
lowerCamelCase_ = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
lowerCamelCase_ = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
lowerCamelCase_ = key.replace("layers.2" , "proj_out" )
lowerCamelCase_ = value
lowerCamelCase_ = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict="ybelkada/segment-anything" ):
lowerCamelCase_ = hf_hub_download(UpperCAmelCase_ , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowerCamelCase_ = SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase_ = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCamelCase_ = SamConfig(
vision_config=UpperCAmelCase_ , )
elif "sam_vit_h" in model_name:
lowerCamelCase_ = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCamelCase_ = SamConfig(
vision_config=UpperCAmelCase_ , )
lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location="cpu" )
lowerCamelCase_ = replace_keys(UpperCAmelCase_ )
lowerCamelCase_ = SamImageProcessor()
lowerCamelCase_ = SamProcessor(image_processor=UpperCAmelCase_ )
lowerCamelCase_ = SamModel(UpperCAmelCase_ )
hf_model.load_state_dict(UpperCAmelCase_ )
lowerCamelCase_ = hf_model.to("cuda" )
lowerCamelCase_ = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCamelCase_ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert("RGB" )
lowerCamelCase_ = [[[400, 650]]]
lowerCamelCase_ = [[1]]
lowerCamelCase_ = processor(images=np.array(UpperCAmelCase_ ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase_ = hf_model(**UpperCAmelCase_ )
lowerCamelCase_ = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
lowerCamelCase_ = processor(
images=np.array(UpperCAmelCase_ ) , input_points=UpperCAmelCase_ , input_labels=UpperCAmelCase_ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase_ = hf_model(**UpperCAmelCase_ )
lowerCamelCase_ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
lowerCamelCase_ = ((75, 275, 1725, 850),)
lowerCamelCase_ = processor(images=np.array(UpperCAmelCase_ ) , input_boxes=UpperCAmelCase_ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase_ = hf_model(**UpperCAmelCase_ )
lowerCamelCase_ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
lowerCamelCase_ = [[[400, 650], [800, 650]]]
lowerCamelCase_ = [[1, 1]]
lowerCamelCase_ = processor(
images=np.array(UpperCAmelCase_ ) , input_points=UpperCAmelCase_ , input_labels=UpperCAmelCase_ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase_ = hf_model(**UpperCAmelCase_ )
lowerCamelCase_ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
a_ : List[str] = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
a_ : Optional[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 55 |
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Optional[Any] = """mask2former"""
a__ : Union[str, Any] = ["""swin"""]
a__ : Dict = {"""hidden_size""": """hidden_dim"""}
def __init__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 1_024 , SCREAMING_SNAKE_CASE = "relu" , SCREAMING_SNAKE_CASE = 6 , SCREAMING_SNAKE_CASE = 10 , SCREAMING_SNAKE_CASE = 8 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 2_048 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 4 , SCREAMING_SNAKE_CASE = 255 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 2.0 , SCREAMING_SNAKE_CASE = 5.0 , SCREAMING_SNAKE_CASE = 5.0 , SCREAMING_SNAKE_CASE = 12_544 , SCREAMING_SNAKE_CASE = 3.0 , SCREAMING_SNAKE_CASE = 0.75 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
snake_case : List[str] = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : Tuple = backbone_config.pop("model_type" )
snake_case : Dict = CONFIG_MAPPING[backbone_model_type]
snake_case : Optional[int] = config_class.from_dict(SCREAMING_SNAKE_CASE )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
snake_case : List[str] = backbone_config
snake_case : Optional[int] = feature_size
snake_case : Optional[int] = mask_feature_size
snake_case : Optional[int] = hidden_dim
snake_case : List[str] = encoder_feedforward_dim
snake_case : Dict = activation_function
snake_case : Optional[Any] = encoder_layers
snake_case : Any = decoder_layers
snake_case : Optional[int] = num_attention_heads
snake_case : List[str] = dropout
snake_case : List[Any] = dim_feedforward
snake_case : Tuple = pre_norm
snake_case : int = enforce_input_projection
snake_case : str = common_stride
snake_case : List[Any] = ignore_value
snake_case : Optional[int] = num_queries
snake_case : Optional[int] = no_object_weight
snake_case : Dict = class_weight
snake_case : Tuple = mask_weight
snake_case : Tuple = dice_weight
snake_case : Tuple = train_num_points
snake_case : int = oversample_ratio
snake_case : Dict = importance_sample_ratio
snake_case : Tuple = init_std
snake_case : Dict = init_xavier_std
snake_case : List[Any] = use_auxiliary_loss
snake_case : Dict = feature_strides
snake_case : List[Any] = output_auxiliary_logits
snake_case : Union[str, Any] = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase_ ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = copy.deepcopy(self.__dict__ )
snake_case : str = self.backbone_config.to_dict()
snake_case : Optional[int] = self.__class__.model_type
return output
| 148 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , a , a=13 , a=3 , a=224 , a=30 , a=400 , a=True , a=None , a=True , a=[0.5, 0.5, 0.5] , a=[0.5, 0.5, 0.5] , ) -> Any:
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = EfficientFormerImageProcessorTester(self)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a , 'image_mean'))
self.assertTrue(hasattr(a , 'image_std'))
self.assertTrue(hasattr(a , 'do_normalize'))
self.assertTrue(hasattr(a , 'do_resize'))
self.assertTrue(hasattr(a , 'size'))
def SCREAMING_SNAKE_CASE__ ( self) -> str:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
# Initialize image_processor
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester , equal_resolution=a)
for image in image_inputs:
self.assertIsInstance(a , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processor(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
# Initialize image_processor
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , numpify=a)
for image in image_inputs:
self.assertIsInstance(a , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processor(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
# Initialize image_processor
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , torchify=a)
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processor(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
| 327 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location='cpu')
SCREAMING_SNAKE_CASE = mam_aaa['args'] or mam_aaa['cfg']['model']
SCREAMING_SNAKE_CASE = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = state_dict['encoder.embed_tokens.weight'].shape[0]
SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
SCREAMING_SNAKE_CASE = state_dict['decoder.embed_tokens.weight']
SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(_UpperCAmelCase)
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a_ : List[str] = parser.parse_args()
a_ : Dict = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 327 | 1 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def _lowerCamelCase( ):
__a = 1_0
__a = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
__a = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [9_7], "text": ["1976"]}] * 1_0,
"id": list(range(a ) ),
} , features=a , )
return dataset
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
__a = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=a )
return filename
# FILE_CONTENT + files
SCREAMING_SNAKE_CASE__:Optional[Any] = """\
Text data.
Second line of data."""
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = tmp_path_factory.mktemp("data" ) / "file.txt"
__a = FILE_CONTENT
with open(a , "w" ) as f:
f.write(a )
return filename
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
import bza
__a = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
__a = bytes(a , "utf-8" )
with bza.open(a , "wb" ) as f:
f.write(a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
import gzip
__a = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
__a = bytes(a , "utf-8" )
with gzip.open(a , "wb" ) as f:
f.write(a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__a = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
__a = bytes(a , "utf-8" )
with lza.frame.open(a , "wb" ) as f:
f.write(a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__a = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(a , "w" ) as archive:
archive.write(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
import tarfile
__a = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(a , "w" ) as f:
f.add(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
import lzma
__a = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
__a = bytes(a , "utf-8" )
with lzma.open(a , "wb" ) as f:
f.write(a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
import zipfile
__a = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__a = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
__a = bytes(a , "utf-8" )
with zstd.open(a , "wb" ) as f:
f.write(a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = tmp_path_factory.mktemp("data" ) / "file.xml"
__a = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(a , "w" ) as f:
f.write(a )
return filename
SCREAMING_SNAKE_CASE__:Union[str, Any] = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
SCREAMING_SNAKE_CASE__:Tuple = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
SCREAMING_SNAKE_CASE__:str = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
SCREAMING_SNAKE_CASE__:Optional[int] = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
SCREAMING_SNAKE_CASE__:List[Any] = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="session" )
def _lowerCamelCase( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = datasets.Dataset.from_dict(a )
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(a ) ) as con:
__a = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(a , "w" , newline="" ) as f:
__a = csv.DictWriter(a , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(a , "w" , newline="" ) as f:
__a = csv.DictWriter(a , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
import bza
__a = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(a , "rb" ) as f:
__a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(a , "wb" ) as f:
f.write(a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.basename(a ) )
f.write(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(a , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.join("main_dir" , os.path.basename(a ) ) )
f.write(a , arcname=os.path.join("main_dir" , os.path.basename(a ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
__a = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(a , "wb" ) as f:
__a = pq.ParquetWriter(a , schema=a )
__a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(a ) )] for k in DATA[0]} , schema=a )
writer.write_table(a )
writer.close()
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
__a = {"data": DATA}
with open(a , "w" ) as f:
json.dump(a , a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
__a = {"data": DATA_DICT_OF_LISTS}
with open(a , "w" ) as f:
json.dump(a , a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(a , "w" ) as f:
for item in DATA:
f.write(json.dumps(a ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(a , "w" ) as f:
for item in DATA:
f.write(json.dumps(a ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(a , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(a ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(a , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(a ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
import gzip
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(a , "rb" ) as orig_file:
with gzip.open(a , "wb" ) as zipped_file:
zipped_file.writelines(a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
import gzip
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(a , "rb" ) as orig_file:
with gzip.open(a , "wb" ) as zipped_file:
zipped_file.writelines(a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.basename(a ) )
f.write(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.join("nested" , os.path.basename(a ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.join("main_dir" , os.path.basename(a ) ) )
f.write(a , arcname=os.path.join("main_dir" , os.path.basename(a ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(a , "w" ) as f:
f.add(a , arcname=os.path.basename(a ) )
f.add(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(a , "w" ) as f:
f.add(a , arcname=os.path.join("nested" , os.path.basename(a ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = ["0", "1", "2", "3"]
__a = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(a , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = ["0", "1", "2", "3"]
__a = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(a , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = ["0", "1", "2", "3"]
__a = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(a , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.basename(a ) )
f.write(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.join("main_dir" , os.path.basename(a ) ) )
f.write(a , arcname=os.path.join("main_dir" , os.path.basename(a ) ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a , a ):
__a = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.basename("unsupported.ext" ) )
f.write(a , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
__a = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(a )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( ):
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def _lowerCamelCase( ):
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def _lowerCamelCase( a , a ):
__a = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(a , "w" ) as f:
f.write(a , arcname=os.path.basename(a ) )
f.write(a , arcname=os.path.basename(a ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def _lowerCamelCase( a ):
__a = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 1_0 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 1_0 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 1_0 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 1_0 )
return data_dir
| 261 | """simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ):
__a , __a = row, column
__a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )]
def __str__( self ):
__a = F"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__a = 0
for row_vector in self.array:
for obj in row_vector:
__a = max(lowerCamelCase , len(str(lowerCamelCase ) ) )
__a = F"%{max_element_length}s"
# Make string and return
def single_line(lowerCamelCase ) -> str:
nonlocal string_format_identifier
__a = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def a__ ( self , lowerCamelCase ):
if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowerCamelCase , lowerCamelCase ):
assert self.validate_indicies(lowerCamelCase )
__a = value
def __add__( self , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] + another[r, c]
return result
def __neg__( self ):
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = -self[r, c]
return result
def __sub__( self , lowerCamelCase ):
return self + (-another)
def __mul__( self , lowerCamelCase ):
if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] * another
return result
elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__a = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__a = F"Unsupported type given for another ({type(lowerCamelCase )})"
raise TypeError(lowerCamelCase )
def a__ ( self ):
__a = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c]
return result
def a__ ( self , lowerCamelCase , lowerCamelCase ):
assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__a = v.transpose()
__a = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _lowerCamelCase( ):
# a^(-1)
__a = Matrix(3 , 3 , 0 )
for i in range(3 ):
__a = 1
print(F"a^(-1) is {ainv}" )
# u, v
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 1, 2, -3
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}" )
def _lowerCamelCase( ):
import doctest
doctest.testmod()
testa()
| 261 | 1 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ = logging.getLogger()
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , "all_results.json" )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCamelCase = json.load(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"can't find {path}" )
return results
lowerCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> List[Any]:
import xla_spawn
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = F"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(__a , "argv" , __a ):
UpperCamelCase = time()
xla_spawn.main()
UpperCamelCase = time()
UpperCamelCase = get_results(__a )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def snake_case_ (self ) -> str:
import xla_spawn
UpperCamelCase = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(__a , "argv" , __a ):
xla_spawn.main()
| 369 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "facebook/bart-large-mnli"
UpperCAmelCase_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
UpperCAmelCase_ = "text_classifier"
UpperCAmelCase_ = AutoTokenizer
UpperCAmelCase_ = AutoModelForSequenceClassification
UpperCAmelCase_ = ["text", ["text"]]
UpperCAmelCase_ = ["text"]
def snake_case_ (self ) -> List[Any]:
super().setup()
UpperCamelCase = self.model.config
UpperCamelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCamelCase = int(__a )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def snake_case_ (self , __a , __a ) -> List[Any]:
UpperCamelCase = labels
return self.pre_processor(
[text] * len(__a ) , [F"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , )
def snake_case_ (self , __a ) -> int:
UpperCamelCase = outputs.logits
UpperCamelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 244 | 0 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = os.path.join(args.tf_model_dir , "parameters.json" )
a : str = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
a : str = args.output + ".pt"
a : Dict = OrderedDict()
with tf.device("/CPU:0" ):
a : Optional[int] = tf.train.load_checkpoint(args.tf_model_dir )
a : Optional[Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
a : Dict = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
a : Union[str, Any] = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
a : Optional[int] = 8
a : Dict = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
a : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : Dict = torch.tensor(_lowercase )
elif key_name.startswith("model/moe" ):
a : List[str] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
a : str = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
a : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[str] = torch.tensor(_lowercase )
elif key_name.endswith("/softmlp/kernel" ):
a : Optional[int] = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
a : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
a : Any = key_name[-9:-7]
for i in range(16 ):
a : List[Any] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
a : str = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
a : Dict = torch.tensor(_lowercase )
elif key_name.startswith("model/mlp" ):
a : Union[str, Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
a : str = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
a : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : Optional[int] = torch.tensor(_lowercase )
elif key_name.endswith("/p1/bias" ):
a : str = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
a : List[Any] = vnp.copy() # same because it is one dimensional
a : Tuple = torch.tensor(_lowercase )
elif key_name.endswith("/p2/kernel" ):
a : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
a : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/p2/bias" ):
a : Dict = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
a : List[str] = vnp.copy() # same because it is one dimensional
a : str = torch.tensor(_lowercase )
elif key_name.startswith("model/ln" ):
a : List[str] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
a : Optional[Any] = "model.blocks.%d.feed_forward.norm.bias" % player
a : Tuple = vnp.copy() # same because it is one dimensional
a : int = torch.tensor(_lowercase )
elif key_name.endswith("/g" ):
a : Optional[Any] = "model.blocks.%d.feed_forward.norm.weight" % player
a : List[str] = vnp.copy() # same because it is one dimensional
a : Tuple = torch.tensor(_lowercase )
elif key_name.startswith("model/att" ):
a : Optional[Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
a : Union[str, Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
a : List[str] = state[:, 0, :, :]
a : Dict = state[:, 1, :, :]
a : Union[str, Any] = state[:, 2, :, :]
a : str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a : List[str] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a : Dict = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
a : Union[str, Any] = torch.tensor(_lowercase )
a : Any = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
a : List[str] = torch.tensor(_lowercase )
a : Optional[Any] = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
a : Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/o/kernel" ):
a : Any = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
a : Optional[int] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
a : Tuple = torch.tensor(_lowercase )
elif key_name.startswith("model/an" ):
a : List[str] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
a : Optional[int] = "model.blocks.%d.self_attn.norm.bias" % player
a : Union[str, Any] = vnp.copy() # same because it is one dimensional
a : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/g" ):
a : Any = "model.blocks.%d.self_attn.norm.weight" % player
a : str = vnp.copy() # same because it is one dimensional
a : Any = torch.tensor(_lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
a : Optional[int] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
a : Tuple = "model.%s.weight" % nlayer
a : Any = vnp.copy() # same in embedded
a : Tuple = torch.tensor(_lowercase )
if key_name.startswith("model/wte" ):
a : Optional[int] = "lm_head.weight"
a : Optional[int] = vnp.copy() # same in embedded
a : Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("model/wob" ):
a : Optional[int] = "final_logits_bias"
a : Optional[Any] = vnp.copy() # same in embedded
a : Optional[int] = state.reshape((1, -1) )
a : List[Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
a : Optional[int] = "model.last_project.weight"
a : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
a : Dict = "model.last_project.bias"
a : Optional[Any] = vnp.copy() # same because it is one dimensional
a : Any = torch.tensor(_lowercase )
torch.save(_lowercase , args.output )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
a : Tuple = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 105 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Optional[int] , **lowerCamelCase :Dict ) -> int:
super().__init__(**lowerCamelCase )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCamelCase )
def UpperCAmelCase_ ( self :Any , **lowerCamelCase :int ) -> int:
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCAmelCase__ = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
UpperCAmelCase__ = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
UpperCAmelCase__ = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
UpperCAmelCase__ = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
UpperCAmelCase__ = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCAmelCase__ = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
UpperCAmelCase__ = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
UpperCAmelCase__ = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
UpperCAmelCase__ = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
UpperCAmelCase__ = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
UpperCAmelCase__ = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
UpperCAmelCase__ = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self :Union[str, Any] , lowerCamelCase :Union[str, Any] , *lowerCamelCase :str , lowerCamelCase :Optional[Any]=None , lowerCamelCase :int=None , **lowerCamelCase :Optional[Any] ) -> str:
return super().__call__(lowerCamelCase , *lowerCamelCase , num_workers=lowerCamelCase , batch_size=lowerCamelCase , **lowerCamelCase )
def UpperCAmelCase_ ( self :Any , lowerCamelCase :str , lowerCamelCase :Optional[Any]=64 , lowerCamelCase :int = 0 , lowerCamelCase :float = 512 / 1500 , lowerCamelCase :Optional[int] = 32 , lowerCamelCase :Optional[int] = 1 , ) -> Any:
UpperCAmelCase__ = load_image(lowerCamelCase )
UpperCAmelCase__ = self.image_processor.size["longest_edge"]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.generate_crop_boxes(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = self.image_processor(images=lowerCamelCase , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
UpperCAmelCase__ = self.get_inference_context()
with inference_context():
UpperCAmelCase__ = self._ensure_tensor_on_device(lowerCamelCase , device=self.device )
UpperCAmelCase__ = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
UpperCAmelCase__ = image_embeddings
UpperCAmelCase__ = grid_points.shape[1]
UpperCAmelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCAmelCase__ = input_labels[:, i : i + points_per_batch]
UpperCAmelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :List[str] , lowerCamelCase :Union[str, Any]=0.88 , lowerCamelCase :Optional[Any]=0.95 , lowerCamelCase :Tuple=0 , lowerCamelCase :Union[str, Any]=1 , ) -> Dict:
UpperCAmelCase__ = model_inputs.pop("input_boxes" )
UpperCAmelCase__ = model_inputs.pop("is_last" )
UpperCAmelCase__ = model_inputs.pop("original_sizes" ).tolist()
UpperCAmelCase__ = model_inputs.pop("reshaped_input_sizes" ).tolist()
UpperCAmelCase__ = self.model(**lowerCamelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCAmelCase__ = model_outputs["pred_masks"]
UpperCAmelCase__ = self.image_processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , binarize=lowerCamelCase )
UpperCAmelCase__ = model_outputs["iou_scores"]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase_ ( self :int , lowerCamelCase :str , lowerCamelCase :Union[str, Any]=False , lowerCamelCase :Union[str, Any]=False , lowerCamelCase :int=0.7 , ) -> Union[str, Any]:
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.image_processor.post_process_for_mask_generation(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = defaultdict(lowerCamelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCamelCase )
UpperCAmelCase__ = {}
if output_rle_mask:
UpperCAmelCase__ = rle_mask
if output_bboxes_mask:
UpperCAmelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 169 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Optional[Any] =logging.get_logger(__name__)
__snake_case : Any ={
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""biogpt"""
def __init__(self ,__lowerCamelCase=4_23_84 ,__lowerCamelCase=10_24 ,__lowerCamelCase=24 ,__lowerCamelCase=16 ,__lowerCamelCase=40_96 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=10_24 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-12 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase=1 ,__lowerCamelCase=0 ,__lowerCamelCase=2 ,**__lowerCamelCase ,) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : List[Any] = max_position_embeddings
lowerCAmelCase__ : Tuple = hidden_size
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : Optional[int] = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : Optional[Any] = layer_norm_eps
lowerCAmelCase__ : int = scale_embedding
lowerCAmelCase__ : List[Any] = use_cache
lowerCAmelCase__ : Tuple = layerdrop
lowerCAmelCase__ : Optional[Any] = activation_dropout
super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase )
| 94 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Tuple =logging.get_logger(__name__)
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple ,lowerCamelCase_ : Dict=False ,lowerCamelCase_ : List[Any]=False ,lowerCamelCase_ : int=False):
'''simple docstring'''
lowerCAmelCase__ : Tuple = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight"""))
rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias"""))
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight"""))
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias"""))
rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight"""))
rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias"""))
rename_keys.append(
(f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight"""))
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias"""))
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight"""))
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias"""))
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
])
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
])
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
])
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
])
else:
pass
return rename_keys
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
for i in range(config.num_hidden_layers):
lowerCAmelCase__ : Optional[Any] = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Dict = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""")
lowerCAmelCase__ : Tuple = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Tuple = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( lowerCamelCase_ : List[str]):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : int):
'''simple docstring'''
lowerCAmelCase__ : int = dct.pop(lowerCamelCase_)
lowerCAmelCase__ : List[Any] = val
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ViltConfig(image_size=384 ,patch_size=32 ,tie_word_embeddings=lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : Any = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : List[Any] = 3129
lowerCAmelCase__ : List[Any] = '''huggingface/label-files'''
lowerCAmelCase__ : Union[str, Any] = '''vqa2-id2label.json'''
lowerCAmelCase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ ,lowerCamelCase_ ,repo_type='''dataset''') ,'''r'''))
lowerCAmelCase__ : Optional[Any] = {int(lowerCamelCase_): v for k, v in idalabel.items()}
lowerCAmelCase__ : Dict = idalabel
lowerCAmelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[int] = ViltForQuestionAnswering(lowerCamelCase_)
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[Any] = 2
lowerCAmelCase__ : Optional[Any] = {0: '''False''', 1: '''True'''}
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : int = ViltForImagesAndTextClassification(lowerCamelCase_)
elif "irtr" in checkpoint_url:
lowerCAmelCase__ : str = True
lowerCAmelCase__ : List[str] = ViltForImageAndTextRetrieval(lowerCamelCase_)
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : int = ViltForMaskedLM(lowerCamelCase_)
else:
raise ValueError('''Unknown model type''')
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ ,map_location='''cpu''')['''state_dict''']
lowerCAmelCase__ : Tuple = create_rename_keys(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
for src, dest in rename_keys:
rename_key(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
read_in_q_k_v(lowerCamelCase_ ,lowerCamelCase_)
if mlm_model or irtr_model:
lowerCAmelCase__ : int = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ ,lowerCamelCase_)
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ : str = model.load_state_dict(lowerCamelCase_ ,strict=lowerCamelCase_)
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowerCamelCase_)
# Define processor
lowerCAmelCase__ : List[str] = ViltImageProcessor(size=384)
lowerCAmelCase__ : Tuple = BertTokenizer.from_pretrained('''bert-base-uncased''')
lowerCAmelCase__ : Union[str, Any] = ViltProcessor(lowerCamelCase_ ,lowerCamelCase_)
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ : int = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' ,stream=lowerCamelCase_).raw)
lowerCAmelCase__ : Union[str, Any] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' ,stream=lowerCamelCase_).raw)
lowerCAmelCase__ : Union[str, Any] = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowerCAmelCase__ : Optional[int] = processor(lowerCamelCase_ ,lowerCamelCase_ ,return_tensors='''pt''')
lowerCAmelCase__ : Tuple = processor(lowerCamelCase_ ,lowerCamelCase_ ,return_tensors='''pt''')
lowerCAmelCase__ : Union[str, Any] = model(
input_ids=encoding_a.input_ids ,pixel_values=encoding_a.pixel_values ,pixel_values_a=encoding_a.pixel_values ,)
else:
lowerCAmelCase__ : Dict = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' ,stream=lowerCamelCase_).raw)
if mlm_model:
lowerCAmelCase__ : int = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowerCAmelCase__ : Optional[int] = '''How many cats are there?'''
lowerCAmelCase__ : Optional[int] = processor(lowerCamelCase_ ,lowerCamelCase_ ,return_tensors='''pt''')
lowerCAmelCase__ : Any = model(**lowerCamelCase_)
# Verify outputs
if mlm_model:
lowerCAmelCase__ : Dict = torch.Size([1, 11, 30522])
lowerCAmelCase__ : int = torch.tensor([-12.5061, -12.5123, -12.5174])
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] ,lowerCamelCase_ ,atol=1E-4)
# verify masked token prediction equals "cats"
lowerCAmelCase__ : Optional[Any] = outputs.logits[0, 4, :].argmax(-1).item()
assert tokenizer.decode([predicted_id]) == "cats"
elif vqa_model:
lowerCAmelCase__ : List[Any] = torch.Size([1, 3129])
lowerCAmelCase__ : str = torch.tensor([-15.9495, -18.1472, -10.3041])
assert torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1E-4)
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] ,lowerCamelCase_ ,atol=1E-4)
# verify vqa prediction equals "2"
lowerCAmelCase__ : List[Any] = outputs.logits.argmax(-1).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ : Union[str, Any] = torch.Size([1, 2])
lowerCAmelCase__ : Dict = torch.tensor([-2.8721, 2.1291])
assert torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1E-4)
assert outputs.logits.shape == expected_shape
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f"""Saving model and processor to {pytorch_dump_folder_path}""")
model.save_pretrained(lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
__snake_case : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__snake_case : Union[str, Any] =parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 94 | 1 |
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(__A ) - 1
UpperCAmelCase__ = 0
while index >= 0:
UpperCAmelCase__ = (ord(column_title[index] ) - 64) * pow(26, __A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 65 | from __future__ import annotations
from scipy.special import comb # type: ignore
class A :
def __init__(self : List[Any] , __UpperCAmelCase : list[tuple[float, float]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCAmelCase__ = len(__UpperCAmelCase ) - 1
def lowercase_ (self : int , __UpperCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCAmelCase ) , 5 ) == 1
return output_values
def lowercase_ (self : Dict , __UpperCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase__ = self.basis_function(__UpperCAmelCase )
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : float = 0.01 ) -> Optional[int]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
UpperCAmelCase__ = [] # x coordinates of points to plot
UpperCAmelCase__ = [] # y coordinates of points to plot
UpperCAmelCase__ = 0.0
while t <= 1:
UpperCAmelCase__ = self.bezier_curve_function(__UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
UpperCAmelCase__ = [i[0] for i in self.list_of_points]
UpperCAmelCase__ = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCAmelCase , __UpperCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 65 | 1 |
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_lowerCAmelCase : Any = False
try:
_lowerCAmelCase : List[str] = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowerCAmelCase__ :
def __init__( self : Dict , snake_case__ : str = None , snake_case__ : list = [] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[str] = choices
UpperCAmelCase__ : Optional[int] = prompt
if sys.platform == "win32":
UpperCAmelCase__ : Optional[Any] = "*"
else:
UpperCAmelCase__ : List[str] = "➔ "
def __a ( self : int , snake_case__ : int , snake_case__ : str = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , snake_case__ )
else:
forceWrite(self.choices[index] , snake_case__ )
def __a ( self : List[str] , snake_case__ : int ):
'''simple docstring'''
if index == self.position:
forceWrite(f' {self.arrow_char} ' )
self.write_choice(snake_case__ )
else:
forceWrite(f' {self.choices[index]}' )
reset_cursor()
def __a ( self : Optional[int] , snake_case__ : Direction , snake_case__ : int = 1 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(snake_case__ )
move_cursor(snake_case__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def __a ( self : Dict ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def __a ( self : str ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def __a ( self : str ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(snake_case__ )] for number in range(1_0 )] )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = int(chr(self.current_selection ) )
UpperCAmelCase__ : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , snake_case__ )
else:
return
else:
return
def __a ( self : List[str] , snake_case__ : int = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
UpperCAmelCase__ : List[str] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(snake_case__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
UpperCAmelCase__ : Dict = int(builtins.input() )
except ValueError:
UpperCAmelCase__ : Tuple = default_choice
else:
UpperCAmelCase__ : int = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(snake_case__ , "\n" )
return choice
| 298 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : int = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : str = do_convert_rgb
UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def __a ( self : str ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = PixaStructImageProcessingTester(self )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Dict = 2_0_4_8
UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
UpperCAmelCase__ : Optional[Any] = "Hello"
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase__ : Optional[int] = 3
@property
def __a ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : int ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298 | 1 |
def A ( a_ ,a_ ) -> int:
return number | (1 << position)
def A ( a_ ,a_ ) -> int:
return number & ~(1 << position)
def A ( a_ ,a_ ) -> int:
return number ^ (1 << position)
def A ( a_ ,a_ ) -> bool:
return ((number >> position) & 1) == 1
def A ( a_ ,a_ ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
# vision encoder
if "img_encoder.pos_embed" in name:
__a : Any = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
__a : str = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
__a : int = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
__a : Union[str, Any] = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
__a : List[Any] = name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
__a : Tuple = name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
__a : List[Any] = name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
__a : Any = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
__a : Union[str, Any] = name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
__a : Optional[int] = name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
__a : Union[str, Any] = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
__a : List[Any] = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
__a : Any = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
__a : Any = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
__a : str = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__a : Union[str, Any] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__a : Union[str, Any] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__a : Union[str, Any] = name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
__a : Optional[int] = name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
__a : str = name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
__a : List[str] = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
__a : str = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
__a : int = name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
__a : List[str] = name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple ):
for key in orig_state_dict.copy().keys():
__a : List[Any] = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__a : Tuple = key.split('''.''' )
__a , __a : List[Any] = int(key_split[2] ), int(key_split[4] )
__a : List[Any] = config.vision_config.hidden_size
if "weight" in key:
__a : int = val[:dim, :]
__a : List[str] = val[dim : dim * 2, :]
__a : List[Any] = val[-dim:, :]
else:
__a : List[str] = val[:dim]
__a : int = val[dim : dim * 2]
__a : Any = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__a : int = key.split('''.''' )
__a : str = int(key_split[3] )
__a : List[Any] = config.text_config.hidden_size
if "weight" in key:
__a : List[str] = val[:dim, :]
__a : Any = val[
dim : dim * 2, :
]
__a : Dict = val[-dim:, :]
else:
__a : List[str] = val[:dim]
__a : Any = val[dim : dim * 2]
__a : Any = val[-dim:]
else:
__a : Union[str, Any] = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__a : List[Any] = val.squeeze_()
else:
__a : Dict = val
return orig_state_dict
def __UpperCamelCase ( ):
__a : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a : str = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]="groupvit-gcc-yfcc" , lowerCAmelCase__ : int=False ):
__a : Union[str, Any] = GroupViTConfig()
__a : int = GroupViTModel(lowerCAmelCase__ ).eval()
__a : Any = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''model''']
__a : Optional[Any] = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
__a , __a : Dict = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
__a : Any = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
__a : Optional[Any] = prepare_img()
__a : Optional[int] = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''pt''' )
with torch.no_grad():
__a : Tuple = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
__a : List[str] = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__a : List[str] = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print('''Successfully saved processor and model to''' , lowerCAmelCase__ )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
model.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
lowercase__ =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 216 | 0 |
from typing import Any
import numpy as np
def A_ ( _lowerCAmelCase ) -> bool:
return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
UpperCamelCase : Optional[int] = v.conjugate().T
UpperCamelCase : List[Any] = v_star.dot(__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray )
return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE ))
def A_ ( ) -> None:
UpperCamelCase : List[str] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
UpperCamelCase : int = np.array([[1], [2], [3]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian."""
print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Union[str, Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian."""
assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 370 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( __snake_case ):
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=A_ , text_encoder=A_ , tokenizer=A_ , unet=A_ , scheduler=A_ , safety_checker=A_ , feature_extractor=A_ , )
def __UpperCamelCase( self , A_ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
self.enable_attention_slicing(A_ )
@torch.no_grad()
def __call__( self , A_ , A_ = 512 , A_ = 512 , A_ = 50 , A_ = 7.5 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , A_ = None , **A_ , ):
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase : Any = 1
elif isinstance(A_ , A_ ):
UpperCamelCase : Optional[Any] = len(A_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(A_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(A_ )}.""" )
# get prompt text embeddings
UpperCamelCase : int = self.tokenizer(
A_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCamelCase : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase : List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCamelCase : Any = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCamelCase : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase , UpperCamelCase , UpperCamelCase : str = text_embeddings.shape
UpperCamelCase : int = text_embeddings.repeat(1 , A_ , 1 )
UpperCamelCase : str = text_embeddings.view(bs_embed * num_images_per_prompt , A_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase : List[str]
if negative_prompt is None:
UpperCamelCase : Tuple = [""]
elif type(A_ ) is not type(A_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(A_ )} !="""
F""" {type(A_ )}.""" )
elif isinstance(A_ , A_ ):
UpperCamelCase : Optional[int] = [negative_prompt]
elif batch_size != len(A_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(A_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
UpperCamelCase : Union[str, Any] = negative_prompt
UpperCamelCase : Tuple = text_input_ids.shape[-1]
UpperCamelCase : str = self.tokenizer(
A_ , padding="max_length" , max_length=A_ , truncation=A_ , return_tensors="pt" , )
UpperCamelCase : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase : Optional[int] = uncond_embeddings.shape[1]
UpperCamelCase : Optional[Any] = uncond_embeddings.repeat(A_ , A_ , 1 )
UpperCamelCase : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCamelCase : Any = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase : Optional[Any] = torch.randn(
A_ , generator=A_ , device="cpu" , dtype=A_ ).to(self.device )
UpperCamelCase : Dict = torch.randn(A_ , generator=A_ , device="cpu" , dtype=A_ ).to(
self.device )
else:
UpperCamelCase : Tuple = torch.randn(
A_ , generator=A_ , device=self.device , dtype=A_ )
UpperCamelCase : str = torch.randn(A_ , generator=A_ , device=self.device , dtype=A_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCamelCase : List[Any] = latents_reference.to(self.device )
UpperCamelCase : Optional[Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCamelCase : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCamelCase : str = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCamelCase : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCamelCase : Optional[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCamelCase : str = 0 if dx < 0 else dx
UpperCamelCase : Union[str, Any] = 0 if dy < 0 else dy
UpperCamelCase : Union[str, Any] = max(-dx , 0 )
UpperCamelCase : Tuple = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCamelCase : Optional[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase : List[str] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase : str = {}
if accepts_eta:
UpperCamelCase : int = eta
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : Tuple = self.scheduler.scale_model_input(A_ , A_ )
# predict the noise residual
UpperCamelCase : List[str] = self.unet(A_ , A_ , encoder_hidden_states=A_ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : Any = noise_pred.chunk(2 )
UpperCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Optional[Any] = self.scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase : int = 1 / 0.1_82_15 * latents
UpperCamelCase : Tuple = self.vae.decode(A_ ).sample
UpperCamelCase : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCamelCase : int = self.feature_extractor(self.numpy_to_pil(A_ ) , return_tensors="pt" ).to(
self.device )
UpperCamelCase , UpperCamelCase : int = self.safety_checker(
images=A_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCamelCase : int = None
if output_type == "pil":
UpperCamelCase : Tuple = self.numpy_to_pil(A_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A_ , nsfw_content_detected=A_ )
| 140 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCAmelCase ( a , a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 1
@register_to_config
def __init__( self : Tuple , _lowerCAmelCase : int = 1_0_0_0 , _lowerCAmelCase : Optional[Union[np.ndarray, List[float]]] = None ) -> Dict:
"""simple docstring"""
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(_lowerCAmelCase )
# standard deviation of the initial noise distribution
snake_case_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
snake_case_ = 4
# running values
snake_case_ = []
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, torch.device] = None ) -> int:
"""simple docstring"""
snake_case_ = num_inference_steps
snake_case_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
snake_case_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
snake_case_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
snake_case_ = torch.sin(steps * math.pi / 2 ) ** 2
snake_case_ = (1.0 - self.betas**2) ** 0.5
snake_case_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
snake_case_ = timesteps.to(_lowerCAmelCase )
snake_case_ = []
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
snake_case_ = (self.timesteps == timestep).nonzero().item()
snake_case_ = timestep_index + 1
snake_case_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowerCAmelCase )
if len(self.ets ) == 1:
snake_case_ = self.ets[-1]
elif len(self.ets ) == 2:
snake_case_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
snake_case_ = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
snake_case_ = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
snake_case_ = self._get_prev_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : torch.FloatTensor , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Union[str, Any] ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = self.alphas[timestep_index]
snake_case_ = self.betas[timestep_index]
snake_case_ = self.alphas[prev_timestep_index]
snake_case_ = self.betas[prev_timestep_index]
snake_case_ = (sample - sigma * ets) / max(_lowerCAmelCase , 1e-8 )
snake_case_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ) -> List[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 159 |
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE :Tuple = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE :Tuple = BASE_URL + '''/user'''
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE :Optional[Any] = os.environ.get('''USER_TOKEN''', '''''')
def _lowerCAmelCase ( lowerCAmelCase_ :str )->dict[Any, Any]:
'''simple docstring'''
snake_case_ = {
"Authorization": F'''token {auth_token}''',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 159 | 1 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = set()
# edges = list of graph's edges
A__ = get_edges(SCREAMING_SNAKE_CASE_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
A__ , A__ = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE_ )
chosen_vertices.add(SCREAMING_SNAKE_CASE_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE_ )
return chosen_vertices
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
A__ = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 364 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowerCamelCase : Optional[Any] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 231 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__snake_case = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__snake_case = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__snake_case = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
a = ZeroShotClassificationPipeline(
model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(__UpperCAmelCase , {'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase )]} )
# No kwarg
a = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(__UpperCAmelCase , {'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase )]} )
a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(__UpperCAmelCase , {'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase )]} )
a = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
__UpperCAmelCase , {'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
a = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
__UpperCAmelCase , {'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
a = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(__UpperCAmelCase , {'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
a = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
__UpperCAmelCase , [
{'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )]}
for i in range(1 )
] , )
a = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
__UpperCAmelCase , [
{'''sequence''': ANY(__UpperCAmelCase ), '''labels''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )], '''scores''': [ANY(__UpperCAmelCase ), ANY(__UpperCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(__UpperCAmelCase ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(__UpperCAmelCase ):
classifier(__UpperCAmelCase , candidate_labels='''politics''' )
with self.assertRaises(__UpperCAmelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(__UpperCAmelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(__UpperCAmelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=__UpperCAmelCase , )
self.run_entailment_id(__UpperCAmelCase )
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Pipeline ) ->int:
"""simple docstring"""
a = zero_shot_classifier.model.config
a = config.labelaid
a = zero_shot_classifier.entailment_id
a = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
a = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
a = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
a = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
a = original_labelaid
self.assertEqual(__UpperCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) ->str:
"""simple docstring"""
a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def __lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
a = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def __lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
a = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
a = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=__UpperCAmelCase , )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False')) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env')
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
])
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :List[Any] ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="utf-8" , check=a , )
assert hasattr(self , "env" )
def _lowerCamelCase ( self :Any , a :Optional[Any] ) -> Dict:
__UpperCamelCase : str = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__UpperCamelCase : Optional[int] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=a , instance_count=a , instance_type=self.instance_type , debugger_hook_config=a , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=a , py_version="py36" , )
def _lowerCamelCase ( self :Dict , a :Dict ) -> Optional[int]:
TrainingJobAnalytics(a ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def _lowerCamelCase ( self :Dict , a :Tuple ) -> List[Any]:
# create estimator
__UpperCamelCase : int = self.create_estimator(a )
# run training
estimator.fit()
# result dataframe
__UpperCamelCase : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__UpperCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__UpperCamelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__UpperCamelCase : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , a ) | 232 | 0 |
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class lowercase__ ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=a__, scheduler=a__ )
@torch.no_grad()
def __call__( self, __magic_name__ = 1, __magic_name__ = None, __magic_name__ = 50, __magic_name__ = "pil", __magic_name__ = True, **__magic_name__, ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a__, )
UpperCamelCase__ : Union[str, Any] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(a__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase__ : List[Any] = self.unet(a__, a__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase__ : Optional[int] = self.scheduler.step(a__, a__, a__ ).prev_sample
UpperCamelCase__ : Optional[int] = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ : Tuple = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ : Optional[int] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=a__ ), "This is a local test"
| 364 |
from collections import deque
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> str:
UpperCamelCase__ : Optional[int] = len(__UpperCAmelCase )
UpperCamelCase__ : str = deque()
UpperCamelCase__ : int = [False for _ in range(__UpperCAmelCase )]
UpperCamelCase__ : Optional[int] = [-1 for _ in range(__UpperCAmelCase )]
UpperCamelCase__ : str = index_of[:]
def strong_connect(__UpperCAmelCase: Optional[int] , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Union[str, Any] ):
UpperCamelCase__ : str = index # the number when this node is seen
UpperCamelCase__ : Any = index # lowest rank node reachable from here
index += 1
stack.append(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = True
for w in g[v]:
if index_of[w] == -1:
UpperCamelCase__ : str = strong_connect(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCamelCase__ : Dict = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : str = stack.pop()
UpperCamelCase__ : int = False
component.append(__UpperCAmelCase )
while w != v:
UpperCamelCase__ : int = stack.pop()
UpperCamelCase__ : Optional[Any] = False
component.append(__UpperCAmelCase )
components.append(__UpperCAmelCase )
return index
UpperCamelCase__ : Optional[Any] = []
for v in range(__UpperCAmelCase ):
if index_of[v] == -1:
strong_connect(__UpperCAmelCase , 0 , __UpperCAmelCase )
return components
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: List[Any] ) -> str:
UpperCamelCase__ : Dict = [[] for _ in range(__UpperCAmelCase )]
for u, v in edges:
g[u].append(__UpperCAmelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ = 7
UpperCAmelCase_ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 247 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.