code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : int = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''donut-swin'''
lowerCAmelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=2_24 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = image_size
lowercase_ : List[str] = patch_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : List[Any] = embed_dim
lowercase_ : List[str] = depths
lowercase_ : List[str] = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = num_heads
lowercase_ : int = window_size
lowercase_ : List[str] = mlp_ratio
lowercase_ : List[Any] = qkv_bias
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Optional[Any] = drop_path_rate
lowercase_ : Tuple = hidden_act
lowercase_ : Any = use_absolute_embeddings
lowercase_ : Optional[int] = layer_norm_eps
lowercase_ : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase_ : str = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
| 93 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 178 |
from __future__ import annotations
def __magic_name__ ( __a : list[list[int]] ):
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__a ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__a ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178 | 1 |
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( lowercase_ , lowercase_ , **lowercase_ ):
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ , **lowercase_ )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(lowercase_ )
model.save_pretrained(lowercase_ )
AutoTokenizer.from_pretrained(lowercase_ ).save_pretrained(lowercase_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 78 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ):
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase ( lowercase_ , lowercase_=512 , lowercase_=512 ):
UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1
UpperCAmelCase = np.transpose(lowercase_ , [2, 0, 1] )
UpperCAmelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 )
return image
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> List[str]:
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any ) -> Optional[int]:
# get the original timestep using init_timestep
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Any=None ) -> Any:
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" )
UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ )
UpperCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase = image
else:
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ )
]
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
else:
UpperCAmelCase = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ )
UpperCAmelCase = self.movq.config.scaling_factor * init_latents
UpperCAmelCase = torch.cat([init_latents] , dim=0 )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = init_latents
return latents
def UpperCAmelCase__ ( self :int , lowercase_ :int=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self :str , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :float = 0.3 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]:
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase = image_embeds.shape[0]
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [image]
if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCAmelCase = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 )
UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase_ )
UpperCAmelCase = self.movq.encode(lowercase_ )['latents']
UpperCAmelCase = latents.repeat_interleave(lowercase_ , dim=0 )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
UpperCAmelCase = self.prepare_latents(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'image_embeds': image_embeds}
UpperCAmelCase = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 78 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = "timm_backbone"
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = backbone
lowercase_ = num_channels
lowercase_ = features_only
lowercase_ = use_pretrained_backbone
lowercase_ = True
lowercase_ = out_indices if out_indices is not None else (-1,)
| 297 |
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297 | 1 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
A_ : Optional[Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = '''mgp-str'''
def __init__( self , A__=[32, 128] , A__=4 , A__=3 , A__=27 , A__=38 , A__=5_0257 , A__=3_0522 , A__=768 , A__=12 , A__=12 , A__=4.0 , A__=True , A__=False , A__=1e-5 , A__=0.0 , A__=0.0 , A__=0.0 , A__=False , A__=0.0_2 , **A__ , ):
super().__init__(**A__ )
A__ : Dict = image_size
A__ : int = patch_size
A__ : Dict = num_channels
A__ : List[Any] = max_token_length
A__ : str = num_character_labels
A__ : Tuple = num_bpe_labels
A__ : Optional[Any] = num_wordpiece_labels
A__ : Optional[int] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Any = num_attention_heads
A__ : List[Any] = mlp_ratio
A__ : Tuple = distilled
A__ : Union[str, Any] = layer_norm_eps
A__ : Tuple = drop_rate
A__ : List[str] = qkv_bias
A__ : Optional[Any] = attn_drop_rate
A__ : Union[str, Any] = drop_path_rate
A__ : Optional[Any] = output_aa_attentions
A__ : Optional[int] = initializer_range
| 192 | 0 |
"""simple docstring"""
import pprint
import requests
a__ : List[str] = '''https://zenquotes.io/api'''
def UpperCAmelCase__ ():
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def UpperCAmelCase__ ():
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
a__ : Optional[Any] = random_quotes()
pprint.pprint(response)
| 195 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowerCAmelCase_ ):
print(f"""{i}\t\t{d}""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [float("inf" )] * vertex_count
__SCREAMING_SNAKE_CASE = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
__SCREAMING_SNAKE_CASE = distance[u] + w
__SCREAMING_SNAKE_CASE = check_negative_cycle(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Union[str, Any] = int(input('''Enter number of vertices: ''').strip())
a__ : Any = int(input('''Enter number of edges: ''').strip())
a__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
a__ , a__ , a__ : str = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
a__ : str = {'''src''': src, '''dst''': dest, '''weight''': weight}
a__ : str = int(input('''\nEnter shortest path source:''').strip())
a__ : List[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 195 | 1 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ : Tuple = logging.get_logger(__name__)
class a ( __snake_case ):
_lowerCAmelCase = ["""input_features""", """is_longer"""]
def __init__( self , __magic_name__=64 , __magic_name__=4_80_00 , __magic_name__=4_80 , __magic_name__=10 , __magic_name__=10_24 , __magic_name__=0.0 , __magic_name__=False , __magic_name__ = 0 , __magic_name__ = 1_40_00 , __magic_name__ = None , __magic_name__ = "fusion" , __magic_name__ = "repeatpad" , **__magic_name__ , ) -> Union[str, Any]:
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
_a = top_db
_a = truncation
_a = padding
_a = fft_window_size
_a = (fft_window_size >> 1) + 1
_a = hop_length
_a = max_length_s
_a = max_length_s * sampling_rate
_a = sampling_rate
_a = frequency_min
_a = frequency_max
_a = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=__SCREAMING_SNAKE_CASE , max_frequency=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , norm=__SCREAMING_SNAKE_CASE , mel_scale='htk' , )
_a = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=__SCREAMING_SNAKE_CASE , max_frequency=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self ) -> Dict[str, Any]:
_a = copy.deepcopy(self.__dict__ )
_a = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> np.ndarray:
_a = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__SCREAMING_SNAKE_CASE , log_mel='dB' , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_a = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_a = [0]
# randomly choose index for each part
_a = np.random.choice(ranges[0] )
_a = np.random.choice(ranges[1] )
_a = np.random.choice(ranges[2] )
_a = mel[idx_front : idx_front + chunk_frames, :]
_a = mel[idx_middle : idx_middle + chunk_frames, :]
_a = mel[idx_back : idx_back + chunk_frames, :]
_a = torch.tensor(mel[None, None, :] )
_a = torch.nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=[chunk_frames, 64] , mode='bilinear' , align_corners=__SCREAMING_SNAKE_CASE )
_a = mel_shrink[0][0].numpy()
_a = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_a = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_a = len(__SCREAMING_SNAKE_CASE ) - max_length
_a = np.random.randint(0 , overflow + 1 )
_a = waveform[idx : idx + max_length]
_a = self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_a = self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE , self.mel_filters )
_a = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_a = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_a = np.stack([mel, mel, mel, mel] , axis=0 )
_a = False
else:
_a = self._random_mel_fusion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_a = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
_a = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_a = int(max_length / len(__SCREAMING_SNAKE_CASE ) )
_a = np.stack(np.tile(__SCREAMING_SNAKE_CASE , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_a = int(max_length / len(__SCREAMING_SNAKE_CASE ) )
_a = np.stack(np.tile(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
_a = np.pad(__SCREAMING_SNAKE_CASE , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_a = self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE , self.mel_filters )
_a = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_a = self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ) -> BatchFeature:
_a = truncation if truncation is not None else self.truncation
_a = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_a = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
_a = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [np.asarray(__SCREAMING_SNAKE_CASE )]
# convert to mel spectrogram, truncate and pad if needed.
_a = [
self._get_input_mel(__SCREAMING_SNAKE_CASE , max_length if max_length else self.nb_max_samples , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for waveform in raw_speech
]
_a = []
_a = []
for mel, longer in padded_inputs:
input_mel.append(__SCREAMING_SNAKE_CASE )
is_longer.append(__SCREAMING_SNAKE_CASE )
if truncation == "fusion" and sum(__SCREAMING_SNAKE_CASE ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_a = np.random.randint(0 , len(__SCREAMING_SNAKE_CASE ) )
_a = True
if isinstance(input_mel[0] , __SCREAMING_SNAKE_CASE ):
_a = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_a = [[longer] for longer in is_longer]
_a = {'input_features': input_mel, 'is_longer': is_longer}
_a = BatchFeature(__SCREAMING_SNAKE_CASE )
if return_tensors is not None:
_a = input_features.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return input_features
| 168 |
"""simple docstring"""
import argparse
import json
import subprocess
def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> List[Any]:
lowerCamelCase_ = []
lowerCamelCase_ = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowerCamelCase_ = subprocess.run(_lowerCamelCase , shell=_lowerCamelCase , stdout=subprocess.PIPE )
lowerCamelCase_ = output.stdout.decode('utf-8' )
lowerCamelCase_ = json.loads(_lowerCamelCase )
lowerCamelCase_ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
lowerCamelCase_ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def lowerCamelCase__ ( _lowerCamelCase : Dict ) -> Tuple:
return values.split(',' )
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 183 | 0 |
from __future__ import annotations
from typing import TypedDict
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = 42
__A = 42
def lowerCAmelCase__ ( a__ ) ->list[str]:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(a__ ) )]
def lowerCAmelCase__ ( a__ ) ->BWTTransformDict:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_UpperCamelCase = all_rotations(a__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCamelCase = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(a__ ),
}
return response
def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_UpperCamelCase = int(a__ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(a__ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_UpperCamelCase = [""] * len(a__ )
for _ in range(len(a__ ) ):
for i in range(len(a__ ) ):
_UpperCamelCase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ = '''Provide a string that I will generate its BWT transform: '''
lowerCamelCase__ = input(entry_msg).strip()
lowerCamelCase__ = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
lowerCamelCase__ = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 371 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCamelCase__ = {'''facebook/blenderbot_small-90M''': 512}
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
_UpperCamelCase = set()
_UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase = char
_UpperCamelCase = set(a__ )
return pairs
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowercase_ : Any , lowercase_ : int , lowercase_ : List[Any]="__start__" , lowercase_ : Optional[int]="__end__" , lowercase_ : List[Any]="__unk__" , lowercase_ : List[str]="__null__" , **lowercase_ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_)
with open(lowercase_ , encoding="utf-8") as vocab_handle:
_UpperCamelCase = json.load(lowercase_)
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(lowercase_ , encoding="utf-8") as merges_handle:
_UpperCamelCase = merges_handle.read().split("\n")[1:-1]
_UpperCamelCase = [tuple(merge.split()) for merge in merges]
_UpperCamelCase = dict(zip(lowercase_ , range(len(lowercase_))))
_UpperCamelCase = {}
@property
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
return len(self.encoder)
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCAmelCase ( self : Tuple , lowercase_ : str) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_UpperCamelCase = re.sub("([.,!?()])" , R" \1" , lowercase_)
_UpperCamelCase = re.sub("(')" , R" \1 " , lowercase_)
_UpperCamelCase = re.sub(R"\s{2,}" , " " , lowercase_)
if "\n" in token:
_UpperCamelCase = token.replace("\n" , " __newln__")
_UpperCamelCase = token.split(" ")
_UpperCamelCase = []
for token in tokens:
if not len(lowercase_):
continue
_UpperCamelCase = token.lower()
_UpperCamelCase = tuple(lowercase_)
_UpperCamelCase = tuple(list(word[:-1]) + [word[-1] + "</w>"])
_UpperCamelCase = get_pairs(lowercase_)
if not pairs:
words.append(lowercase_)
continue
while True:
_UpperCamelCase = min(lowercase_ , key=lambda lowercase_: self.bpe_ranks.get(lowercase_ , float("inf")))
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase = bigram
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(lowercase_):
try:
_UpperCamelCase = word.index(lowercase_ , lowercase_)
new_word.extend(word[i:j])
_UpperCamelCase = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowercase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_UpperCamelCase = tuple(lowercase_)
_UpperCamelCase = new_word
if len(lowercase_) == 1:
break
else:
_UpperCamelCase = get_pairs(lowercase_)
_UpperCamelCase = "@@ ".join(lowercase_)
_UpperCamelCase = word[:-4]
_UpperCamelCase = word
words.append(lowercase_)
return " ".join(lowercase_)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : str) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = re.findall(R"\S+\n?" , lowercase_)
for token in words:
split_tokens.extend(list(self.bpe(lowercase_).split(" ")))
return split_tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
_UpperCamelCase = token.lower()
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token))
def __UpperCAmelCase ( self : Any , lowercase_ : int) -> str:
"""simple docstring"""
return self.decoder.get(lowercase_ , self.unk_token)
def __UpperCAmelCase ( self : Any , lowercase_ : List[str]) -> str:
"""simple docstring"""
_UpperCamelCase = " ".join(lowercase_).replace("@@ " , "").strip()
return out_string
def __UpperCAmelCase ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowercase_ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_) + "\n")
_UpperCamelCase = 0
with open(lowercase_ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_: kv[1]):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!")
_UpperCamelCase = token_index
writer.write(" ".join(lowercase_) + "\n")
index += 1
return vocab_file, merge_file
| 63 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
lowercase_ = len(__lowerCAmelCase )
lowercase_ = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
lowercase_ = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 136 | 1 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_UpperCAmelCase = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
_UpperCAmelCase = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
_UpperCAmelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCamelCase ( __lowercase : tuple ):
'''simple docstring'''
return x[0]
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : int = get_letter_count(__lowercase )
A_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowercase )
A_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=__lowercase )
A_ : Tuple = ''.join(freq_to_letter[freq] )
A_ : Optional[int] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowercase ,reverse=__lowercase )
A_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowercase )
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
A_ : List[str] = get_frequency_order(__lowercase )
A_ : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192 | from math import isqrt
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 ,isqrt(__lowercase ) + 1 ) )
def UpperCamelCase ( __lowercase : int = 10**6 ):
'''simple docstring'''
A_ : Optional[Any] = 0
A_ : List[str] = 1
A_ : Dict = 7
while prime_candidate < max_prime:
primes_count += is_prime(__lowercase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 192 | 1 |
'''simple docstring'''
lowercase : Optional[Any] = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 3 | import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : int = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowerCAmelCase__ : Union[str, Any] = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def UpperCamelCase__ ( A__ , A__ ) -> List[str]:
snake_case__ : Optional[Any] = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case__ : Dict = int(re.match(r'.*layer_(\d*).*' , A__ )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def UpperCamelCase__ ( A__ ) -> str:
if dtype == torch.bool:
return 1 / 8
snake_case__ : List[str] = re.search(r'[^\d](\d+)$' , str(A__ ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
snake_case__ : Union[str, Any] = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ ) -> List[str]:
# Construct model
if bloom_config_file == "":
snake_case__ : Union[str, Any] = BloomConfig()
else:
snake_case__ : int = BloomConfig.from_json_file(A__ )
if shard_model:
snake_case__ : Tuple = os.listdir(A__ )
snake_case__ : str = sorted(filter(lambda A__ : s.startswith('layer' ) and "model_00" in s , A__ ) )
snake_case__ : str = {'weight_map': {}, 'metadata': {}}
snake_case__ : Optional[int] = 0
snake_case__ : Tuple = None
snake_case__ : Any = BloomConfig()
for j, file in enumerate(A__ ):
print('Processing file: {}'.format(A__ ) )
snake_case__ : str = None
for i in range(A__ ):
# load all TP files
snake_case__ : Optional[int] = file.replace('model_00' , F"""model_0{i}""" )
snake_case__ : int = torch.load(os.path.join(A__ , A__ ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case__ : List[Any] = list(temp.keys() )
for key in keys:
snake_case__ : List[Any] = temp.pop(A__ )
if tensors is None:
snake_case__ : Optional[Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : Optional[int] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : Dict = tensors[key] / pretraining_tp
torch.save(
A__ , os.path.join(
A__ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case__ : List[Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case__ : Optional[int] = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(A__ ) ).zfill(5 ) )
snake_case__ : Dict = BloomConfig()
snake_case__ : str = pytorch_dump_folder_path + '/' + CONFIG_NAME
snake_case__ : int = total_size
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A__ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
snake_case__ : List[str] = json.dumps(A__ , indent=2 , sort_keys=A__ ) + '\n'
f.write(A__ )
else:
snake_case__ : int = BloomModel(A__ )
snake_case__ : Dict = os.listdir(A__ )
snake_case__ : Union[str, Any] = sorted(filter(lambda A__ : s.startswith('layer' ) and "model_00" in s , A__ ) )
snake_case__ : List[str] = None
for i, file in enumerate(A__ ):
snake_case__ : Dict = None
for i in range(A__ ):
# load all TP files
snake_case__ : List[Any] = file.replace('model_00' , F"""model_0{i}""" )
snake_case__ : int = torch.load(os.path.join(A__ , A__ ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case__ : List[str] = list(temp.keys() )
for key in keys:
snake_case__ : Any = temp.pop(A__ )
if tensors is None:
snake_case__ : Union[str, Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=A__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : Optional[int] = tensors[key] / pretraining_tp
snake_case__ : int = model.load_state_dict(A__ , strict=A__ )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
snake_case__ : List[Any] = set(other_keys.missing_keys )
else:
snake_case__ : Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(A__ , exist_ok=A__ )
snake_case__ : Any = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
snake_case__ : List[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
snake_case__ : str = model.to(config.torch_dtype )
torch.save(model.state_dict() , A__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowerCAmelCase__ : Any = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 143 | 0 |
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCamelCase : Tuple = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def SCREAMING_SNAKE_CASE__ ( lowercase=None ) -> Any:
if subparsers is not None:
snake_case : Any = subparsers.add_parser("""tpu-config""" ,description=_description )
else:
snake_case : Union[str, Any] = argparse.ArgumentParser("""Accelerate tpu-config command""" ,description=_description )
# Core arguments
snake_case : Optional[Any] = parser.add_argument_group(
"""Config Arguments""" ,"""Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" ,type=_UpperCAmelCase ,default=_UpperCAmelCase ,help="""Path to the config file to use for accelerate.""" ,)
config_args.add_argument(
"""--tpu_name""" ,default=_UpperCAmelCase ,help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" ,)
config_args.add_argument(
"""--tpu_zone""" ,default=_UpperCAmelCase ,help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" ,)
snake_case : Union[str, Any] = parser.add_argument_group("""TPU Arguments""" ,"""Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" ,action="""store_true""" ,help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" ,)
pod_args.add_argument(
"""--command_file""" ,default=_UpperCAmelCase ,help="""The path to the file containing the commands to run on the pod on startup.""" ,)
pod_args.add_argument(
"""--command""" ,action="""append""" ,nargs="""+""" ,help="""A command to run on the pod. Can be passed multiple times.""" ,)
pod_args.add_argument(
"""--install_accelerate""" ,action="""store_true""" ,help="""Whether to install accelerate on the pod. Defaults to False.""" ,)
pod_args.add_argument(
"""--accelerate_version""" ,default="""latest""" ,help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.""" ,)
pod_args.add_argument(
"""--debug""" ,action="""store_true""" ,help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Dict = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
snake_case : Union[str, Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
snake_case : int = defaults.command_file
if not args.command and defaults.commands is not None:
snake_case : Any = defaults.commands
if not args.tpu_name:
snake_case : int = defaults.tpu_name
if not args.tpu_zone:
snake_case : List[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
snake_case : Optional[int] = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
snake_case : Dict = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) ,_UpperCAmelCase ):
snake_case : Union[str, Any] = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file ,"""r""" ) as f:
snake_case : List[str] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] ,_UpperCAmelCase ):
snake_case : Optional[int] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
snake_case : Optional[Any] = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
snake_case : Optional[Any] = '; '.join(_UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
snake_case : Any = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {" ".join(_UpperCAmelCase )}""" )
return
subprocess.run(_UpperCAmelCase )
print("""Successfully setup pod.""" )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
snake_case : Optional[Any] = tpu_command_parser()
snake_case : Optional[int] = parser.parse_args()
tpu_command_launcher(_UpperCAmelCase )
| 350 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = IFPipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCAmelCase ( self ) -> Any:
return self._get_dummy_components()
def UpperCAmelCase ( self , A , A=0 ) -> Optional[int]:
if str(A ).startswith("""mps""" ):
snake_case : List[str] = torch.manual_seed(A )
else:
snake_case : Optional[int] = torch.Generator(device=A ).manual_seed(A )
snake_case : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ) -> Any:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCAmelCase ( self ) -> List[str]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase ( self ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase ( self ) -> List[str]:
self._test_save_load_local()
def UpperCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> List[Any]:
# if
snake_case : Tuple = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
snake_case : Tuple = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=A , tokenizer=A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
snake_case , snake_case : Optional[int] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
snake_case : List[str] = None
snake_case : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(A , A , A , A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
snake_case : Any = IFImgaImgPipeline(**pipe_a.components )
snake_case : Dict = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(A , A , A , A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
snake_case : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
snake_case : Any = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(A , A , A , A )
def UpperCAmelCase ( self , A , A , A , A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
snake_case : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Tuple = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , num_inference_steps=2 , generator=A , output_type="""np""" , )
snake_case : Optional[int] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
snake_case : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
snake_case : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(A , A )
# pipeline 2
_start_torch_memory_measurement()
snake_case : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A )
snake_case : str = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , generator=A , num_inference_steps=2 , output_type="""np""" , )
snake_case : str = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
snake_case : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
snake_case : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A , A )
def UpperCAmelCase ( self , A , A , A , A ) -> int:
# pipeline 1
_start_torch_memory_measurement()
snake_case : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A )
snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Union[str, Any] = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , num_inference_steps=2 , generator=A , output_type="""np""" , )
snake_case : Optional[int] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
snake_case : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(A , A )
# pipeline 2
_start_torch_memory_measurement()
snake_case : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : int = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A )
snake_case : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A )
snake_case : int = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , original_image=A , generator=A , num_inference_steps=2 , output_type="""np""" , )
snake_case : List[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
snake_case : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
snake_case : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A , A )
def UpperCAmelCase ( self , A , A , A , A ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
snake_case : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A )
snake_case : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(A )
snake_case : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : Tuple = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , mask_image=A , num_inference_steps=2 , generator=A , output_type="""np""" , )
snake_case : Tuple = output.images[0]
assert image.shape == (6_4, 6_4, 3)
snake_case : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
snake_case : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(A , A )
# pipeline 2
_start_torch_memory_measurement()
snake_case : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case : int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A )
snake_case : Any = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A )
snake_case : str = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(A )
snake_case : List[str] = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , mask_image=A , original_image=A , generator=A , num_inference_steps=2 , output_type="""np""" , )
snake_case : List[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
snake_case : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
snake_case : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A , A )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 176 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase_ : Any = '''\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n'''
UpperCAmelCase_ : List[Any] = '''\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'''
UpperCAmelCase_ : str = '''\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _A ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _A ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int=4 , __lowerCamelCase : List[Any]=False ):
UpperCamelCase :Optional[Any] = compute_bleu(
reference_corpus=snake_case_ , translation_corpus=snake_case_ , max_order=snake_case_ , smooth=snake_case_ )
(UpperCamelCase) :Optional[int] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 38 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__a = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : List[str] , *snake_case_ : str , **snake_case_ : List[str] ):
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 35 | 0 |
"""simple docstring"""
from math import isqrt, loga
def __A (_SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[str] = False
return [i for i in range(2 , _SCREAMING_SNAKE_CASE ) if is_prime[i]]
def __A (_SCREAMING_SNAKE_CASE = 80_0800 , _SCREAMING_SNAKE_CASE = 80_0800 ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Any = degree * loga(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = int(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = calculate_prime_numbers(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = 0
lowerCAmelCase__ :Optional[Any] = 0
lowerCAmelCase__ :Optional[int] = len(_SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 254 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="None" , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = parent
lowerCAmelCase__ :int = batch_size
lowerCAmelCase__ :List[str] = seq_length
lowerCAmelCase__ :Tuple = is_training
lowerCAmelCase__ :Tuple = use_input_mask
lowerCAmelCase__ :Dict = use_token_type_ids
lowerCAmelCase__ :Union[str, Any] = use_labels
lowerCAmelCase__ :Tuple = vocab_size
lowerCAmelCase__ :List[Any] = hidden_size
lowerCAmelCase__ :Tuple = num_hidden_layers
lowerCAmelCase__ :str = num_attention_heads
lowerCAmelCase__ :List[str] = intermediate_size
lowerCAmelCase__ :Optional[Any] = hidden_act
lowerCAmelCase__ :Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ :Any = attention_probs_dropout_prob
lowerCAmelCase__ :Dict = max_position_embeddings
lowerCAmelCase__ :Tuple = type_vocab_size
lowerCAmelCase__ :List[str] = type_sequence_label_size
lowerCAmelCase__ :Tuple = initializer_range
lowerCAmelCase__ :Optional[Any] = num_labels
lowerCAmelCase__ :int = num_choices
lowerCAmelCase__ :Union[str, Any] = relative_attention
lowerCAmelCase__ :int = position_biased_input
lowerCAmelCase__ :Optional[int] = pos_att_type
lowerCAmelCase__ :Dict = scope
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :int = None
if self.use_input_mask:
lowerCAmelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase__ :Optional[Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :Union[str, Any] = None
lowerCAmelCase__ :Dict = None
if self.use_labels:
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ :Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ :Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.get_config()
lowerCAmelCase__ :Optional[Any] = 3_0_0
return config
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = DebertaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
lowerCAmelCase__ :Dict = model(__UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = DebertaForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.num_labels
lowerCAmelCase__ :int = DebertaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.num_labels
lowerCAmelCase__ :Any = DebertaForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = DebertaForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :str = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :Tuple = config_and_inputs
lowerCAmelCase__ :int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ :Optional[Any] = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ :Tuple = True
__magic_name__ :List[Any] = False
__magic_name__ :Optional[Any] = False
__magic_name__ :str = False
__magic_name__ :int = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = DebertaModelTester(self )
lowerCAmelCase__ :List[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :int = DebertaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def snake_case ( self ):
'''simple docstring'''
pass
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = DebertaModel.from_pretrained('microsoft/deberta-base' )
lowerCAmelCase__ :str = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase__ :Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ :int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
# compare the actual values for a slice.
lowerCAmelCase__ :str = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
| 254 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : List[str] = {"""vocab_file""": """sentencepiece.model"""}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
a_ : str = {
"""google/rembert""": 256,
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="[CLS]" , UpperCamelCase="[SEP]" , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.sp_model )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = d
lowerCamelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def snake_case ( self , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCamelCase_ = self.sp_model.EncodeAsPieces(UpperCamelCase )
return pieces
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.sp_model.decode_pieces(UpperCamelCase )
return out_string
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(UpperCamelCase ) )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 55 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCAmelCase__ : Tuple = logging.getLogger(__name__)
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] ="""token-classification"""
def __init__( self : Tuple , UpperCAmelCase__ : Tuple ) ->Optional[Any]:
"""simple docstring"""
if type(UpperCAmelCase__ ) == dict:
SCREAMING_SNAKE_CASE : List[str] = Namespace(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = import_module("""tasks""" )
try:
SCREAMING_SNAKE_CASE : str = getattr(UpperCAmelCase__ , hparams.task_type )
SCREAMING_SNAKE_CASE : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
SCREAMING_SNAKE_CASE : List[Any] = self.token_classification_task.get_labels(hparams.labels )
SCREAMING_SNAKE_CASE : List[Any] = CrossEntropyLoss().ignore_index
super().__init__(UpperCAmelCase__ , len(self.labels ) , self.mode )
def _lowercase ( self : List[str] , **UpperCAmelCase__ : Optional[int] ) ->Any:
"""simple docstring"""
return self.model(**UpperCAmelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE : List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = self(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowercase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
SCREAMING_SNAKE_CASE : Any = self._feature_file(UpperCAmelCase__ )
if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = torch.load(UpperCAmelCase__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
SCREAMING_SNAKE_CASE : Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.token_classification_task.convert_examples_to_features(
UpperCAmelCase__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCAmelCase__ , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , UpperCAmelCase__ )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) ->DataLoader:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self._feature_file(UpperCAmelCase__ )
logger.info("""Loading features from cached file %s""" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = torch.load(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
SCREAMING_SNAKE_CASE : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
SCREAMING_SNAKE_CASE : Dict = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , batch_size=UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) ->Tuple:
"""simple docstring"""
"""Compute validation""" ""
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE : str = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE : Dict = self(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = outputs[:2]
SCREAMING_SNAKE_CASE : Optional[Any] = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE : Tuple = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self : int , UpperCAmelCase__ : List[str] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
SCREAMING_SNAKE_CASE : str = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = np.argmax(UpperCAmelCase__ , axis=2 )
SCREAMING_SNAKE_CASE : Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = dict(enumerate(self.labels ) )
SCREAMING_SNAKE_CASE : int = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
SCREAMING_SNAKE_CASE : Tuple = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"""precision""": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"""recall""": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"""f1""": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
SCREAMING_SNAKE_CASE : Optional[int] = dict(results.items() )
SCREAMING_SNAKE_CASE : Optional[Any] = results
return ret, preds_list, out_label_list
def _lowercase ( self : Dict , UpperCAmelCase__ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self._eval_end(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self._eval_end(UpperCAmelCase__ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
SCREAMING_SNAKE_CASE : Dict = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ) ->List[Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(UpperCAmelCase__ , UpperCAmelCase__ )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=UpperCAmelCase__ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=UpperCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=UpperCAmelCase__ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=UpperCAmelCase__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
UpperCAmelCase__ : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCAmelCase__ : Tuple = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase__ : int = parser.parse_args()
UpperCAmelCase__ : Union[str, Any] = NERTransformer(args)
UpperCAmelCase__ : str = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCAmelCase__ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
UpperCAmelCase__ : Any = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 245 | 0 |
'''simple docstring'''
def __a ( _UpperCamelCase: List[str] ) -> list:
"""simple docstring"""
_snake_case = [0] * len(__UpperCAmelCase )
for i in range(1 , len(__UpperCAmelCase ) ):
# use last results for better performance - dynamic programming
_snake_case = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_snake_case = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_snake_case = j
return prefix_result
def __a ( _UpperCamelCase: str ) -> int:
"""simple docstring"""
return max(prefix_function(__UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __a ( _UpperCamelCase: Tuple ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = os.path.join(args.tf_model_dir , "parameters.json" )
_snake_case = json.loads(open(_UpperCamelCase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
_snake_case = args.output + ".pt"
_snake_case = OrderedDict()
with tf.device("/CPU:0" ):
_snake_case = tf.train.load_checkpoint(args.tf_model_dir )
_snake_case = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_snake_case = reader.get_tensor(_UpperCamelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_snake_case = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_snake_case = 8
_snake_case = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/moe" ):
_snake_case = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/softmlp/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_snake_case = key_name[-9:-7]
for i in range(16 ):
_snake_case = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_snake_case = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/mlp" ):
_snake_case = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p1/bias" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/bias" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/ln" ):
_snake_case = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_snake_case = "model.blocks.%d.feed_forward.norm.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
_snake_case = "model.blocks.%d.feed_forward.norm.weight" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/att" ):
_snake_case = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_snake_case = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_snake_case = state[:, 0, :, :]
_snake_case = state[:, 1, :, :]
_snake_case = state[:, 2, :, :]
_snake_case = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_snake_case = torch.tensor(_UpperCamelCase )
_snake_case = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_snake_case = torch.tensor(_UpperCamelCase )
_snake_case = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/o/kernel" ):
_snake_case = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_snake_case = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/an" ):
_snake_case = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_snake_case = "model.blocks.%d.self_attn.norm.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
_snake_case = "model.blocks.%d.self_attn.norm.weight" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_snake_case = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_snake_case = "model.%s.weight" % nlayer
_snake_case = vnp.copy() # same in embedded
_snake_case = torch.tensor(_UpperCamelCase )
if key_name.startswith("model/wte" ):
_snake_case = "lm_head.weight"
_snake_case = vnp.copy() # same in embedded
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/wob" ):
_snake_case = "final_logits_bias"
_snake_case = vnp.copy() # same in embedded
_snake_case = state.reshape((1, -1) )
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense/kernel":
_snake_case = "model.last_project.weight"
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense_1/bias":
_snake_case = "model.last_project.bias"
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
torch.save(_UpperCamelCase , args.output )
if __name__ == "__main__":
UpperCamelCase_ : Tuple = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
UpperCamelCase_ : Any = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 142 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 178 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=4 , ) -> int:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_choices
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = True
lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = FlaxRobertaModelTester(self )
@slow
def _UpperCamelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained('roberta-base' , from_pt=a )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
| 178 | 1 |
def SCREAMING_SNAKE_CASE ( ):
return 1
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 200 ):
return two_pound(snake_case_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 358 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Any = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286 | 0 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__a :Optional[int] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=1 ):
A_ = tokenizer
A_ = dataset
A_ = len(UpperCAmelCase ) if n_tasks is None else n_tasks
A_ = n_copies
def __iter__( self : Optional[int] ):
A_ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any ):
A_ = start_length
A_ = eof_strings
A_ = tokenizer
def __call__( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Any , **UpperCAmelCase : Tuple ):
A_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
A_ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = re.split("(%s)" % "|".join(__UpperCamelCase ) ,__UpperCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : int ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any]=20 ,**__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = defaultdict(__UpperCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__UpperCamelCase ) ):
with torch.no_grad():
A_ = batch["ids"].shape[-1]
A_ = accelerator.unwrap_model(__UpperCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] ,num_return_sequences=__UpperCamelCase ,**__UpperCamelCase )
# each task is generated batch_size times
A_ = batch["task_id"].repeat(__UpperCamelCase )
A_ = accelerator.pad_across_processes(
__UpperCamelCase ,dim=1 ,pad_index=tokenizer.pad_token_id )
A_ , A_ = accelerator.gather((generated_tokens, generated_tasks) )
A_ = generated_tokens.cpu().numpy()
A_ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__UpperCamelCase ,__UpperCamelCase ):
gen_token_dict[task].append(__UpperCamelCase )
A_ = [[] for _ in range(__UpperCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
A_ = tokenizer.decode(__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase )
code_gens[task].append(remove_last_block(__UpperCamelCase ) )
return code_gens
def __snake_case ( ):
"""simple docstring"""
A_ = HfArgumentParser(__UpperCamelCase )
A_ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
A_ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
A_ = "false"
if args.num_workers is None:
A_ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
A_ = Accelerator()
set_seed(args.seed ,device_specific=__UpperCamelCase )
# Load model and tokenizer
A_ = AutoTokenizer.from_pretrained(args.model_ckpt )
A_ = tokenizer.eos_token
A_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
A_ = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 ,__UpperCamelCase ,__UpperCamelCase )] ),
}
# Load evaluation dataset and metric
A_ = load_dataset("openai_humaneval" )
A_ = load_metric("code_eval" )
A_ = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
A_ = args.n_samples // args.batch_size
A_ = TokenizedDataset(__UpperCamelCase ,human_eval["test"] ,n_copies=__UpperCamelCase ,n_tasks=__UpperCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
A_ = DataLoader(__UpperCamelCase ,batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
A_ = code_eval_metric.compute(references=[""] ,predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
A_ = complete_code(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,n_tasks=__UpperCamelCase ,batch_size=args.batch_size ,**__UpperCamelCase ,)
if accelerator.is_main_process:
A_ = []
for task in tqdm(range(__UpperCamelCase ) ):
A_ = human_eval["test"][task]["test"]
A_ = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
A_ , A_ = code_eval_metric.compute(
references=__UpperCamelCase ,predictions=__UpperCamelCase ,num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file ,"w" ) as fp:
json.dump(__UpperCamelCase ,__UpperCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 312 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __snake_case ( ):
"""simple docstring"""
A_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
A_ = Dataset.from_dict(__UpperCamelCase )
return dataset
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = get_dataset()
A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __A ( self : List[Any] ):
A_ = get_dataset()
A_ , A_ = deduplicate_dataset(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 2 )
print(UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase ) | 312 | 1 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase__ : Tuple = ""
lowerCAmelCase__ : Optional[int] = ""
lowerCAmelCase__ : int = ""
lowerCAmelCase__ : str = ""
def __UpperCamelCase ( _UpperCAmelCase ):
# authorize twitter, initialize tweepy
__UpperCAmelCase : int = tweepy.OAuthHandler(_UpperCAmelCase, _UpperCAmelCase )
auth.set_access_token(_UpperCAmelCase, _UpperCAmelCase )
__UpperCAmelCase : Any = tweepy.API(_UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
__UpperCAmelCase : List[Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__UpperCAmelCase : List[Any] = api.user_timeline(screen_name=_UpperCAmelCase, count=200 )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# save the id of the oldest tweet less one
__UpperCAmelCase : int = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCAmelCase ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__UpperCAmelCase : Tuple = api.user_timeline(
screen_name=_UpperCAmelCase, count=200, max_id=_UpperCAmelCase )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# update the id of the oldest tweet less one
__UpperCAmelCase : int = alltweets[-1].id - 1
print(F"...{len(_UpperCAmelCase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__UpperCAmelCase : Any = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv", "w" ) as f:
__UpperCAmelCase : int = csv.writer(_UpperCAmelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 37 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Any = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Tuple = PegasusTokenizer(UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowerCamelCase_ ( self : List[Any] , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : int ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "</s>"
__UpperCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(UpperCAmelCase_ ) , 1_103 )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Tuple = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
__UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__UpperCAmelCase : int = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__UpperCAmelCase : Tuple = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
__UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
__UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
__UpperCAmelCase : Tuple = "To ensure a smooth flow of bank resolutions."
__UpperCAmelCase : str = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
__UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ["This is going to be way too long." * 150, "short example"]
__UpperCAmelCase : Optional[int] = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : str = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
__UpperCAmelCase : Union[str, Any] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
# fmt: off
__UpperCAmelCase : Tuple = {"input_ids": [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : List[str] = PegasusTokenizer(UpperCAmelCase_ , offset=0 , mask_token_sent=UpperCAmelCase_ , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase_ : int ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : List[str] = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
__UpperCAmelCase : str = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__UpperCAmelCase : int = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = ["This is going to be way too long." * 1_000, "short example"]
__UpperCAmelCase : List[Any] = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : int = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
__UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
__UpperCAmelCase : int = self._large_tokenizer(UpperCAmelCase_ ).input_ids
self.assertListEqual(
UpperCAmelCase_ , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 37 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def UpperCAmelCase_ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__SCREAMING_SNAKE_CASE ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def UpperCAmelCase_ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def UpperCAmelCase_ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__SCREAMING_SNAKE_CASE ):
http_head('https://huggingface.co' )
| 195 |
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def UpperCAmelCase_ ( ):
lowercase = Github(os.environ['GITHUB_TOKEN'] )
lowercase = g.get_repo('huggingface/diffusers' )
lowercase = repo.get_issues(state='open' )
for issue in open_issues:
lowercase = sorted(issue.get_comments() , key=lambda __SCREAMING_SNAKE_CASE : i.created_at , reverse=__SCREAMING_SNAKE_CASE )
lowercase = comments[0] if len(__SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 195 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def snake_case_ (__A : str , __A : List[str] , __A : Tuple=None , __A : Union[str, Any]=None ) -> Optional[Any]:
if attention_mask is None:
__lowerCAmelCase : Optional[int] = tf.cast(tf.math.not_equal(__A , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : List[Any] =OPTConfig
lowerCamelCase : int ={}
lowerCamelCase : Optional[Any] ="gelu"
def __init__( self : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict=13 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=False , lowerCAmelCase : Tuple=99 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : int=2 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : str=20 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Dict=1 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=16 , lowerCAmelCase : Dict=16 , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : Union[str, Any] = seq_length
__lowerCAmelCase : List[Any] = is_training
__lowerCAmelCase : Union[str, Any] = use_labels
__lowerCAmelCase : Tuple = vocab_size
__lowerCAmelCase : Optional[Any] = hidden_size
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = max_position_embeddings
__lowerCAmelCase : int = eos_token_id
__lowerCAmelCase : str = pad_token_id
__lowerCAmelCase : int = bos_token_id
__lowerCAmelCase : Union[str, Any] = embed_dim
__lowerCAmelCase : int = word_embed_proj_dim
__lowerCAmelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase : Tuple = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase , **self.config_updates , )
__lowerCAmelCase : Optional[int] = prepare_opt_inputs_dict(lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = TFOPTModel(config=lowerCAmelCase )
__lowerCAmelCase : Any = inputs_dict["""input_ids"""]
__lowerCAmelCase : List[Any] = input_ids[:1, :]
__lowerCAmelCase : str = inputs_dict["""attention_mask"""][:1, :]
__lowerCAmelCase : Dict = 1
# first forward pass
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
__lowerCAmelCase ,__lowerCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase : int = model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
__lowerCAmelCase : Optional[Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase : str = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase , lowerCAmelCase , rtol=1e-3 )
@require_tf
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] =(TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCamelCase : Dict =(TFOPTForCausalLM,) if is_tf_available() else ()
lowerCamelCase : Tuple =(
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCamelCase : Optional[int] =False
lowerCamelCase : Any =False
lowerCamelCase : Optional[int] =False
lowerCamelCase : Dict =10
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = TFOPTModelTester(self )
__lowerCAmelCase : Tuple = ConfigTester(self , config_class=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase : Optional[int] , lowerCAmelCase : Any ):
if hasattr(lowerCAmelCase , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowerCAmelCase : List[Any] = model_class(config=lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = _get_word_embedding_weight(lowerCAmelCase , model.get_input_embeddings() )
__lowerCAmelCase : int = _get_word_embedding_weight(lowerCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = _get_word_embedding_weight(lowerCAmelCase , model.get_input_embeddings() )
__lowerCAmelCase : Any = _get_word_embedding_weight(lowerCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCAmelCase : Dict = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase )
# check that weights remain the same after resizing
__lowerCAmelCase : int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase : Dict = False
self.assertTrue(lowerCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase )
__lowerCAmelCase : int = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase : Union[str, Any] = False
self.assertTrue(lowerCAmelCase )
def snake_case_ (__A : Optional[int] ) -> Union[str, Any]:
return tf.constant(__A , dtype=tf.intaa )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] =99
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCAmelCase : Dict = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCAmelCase : Union[str, Any] = input_ids.shape[0]
__lowerCAmelCase : Optional[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Dict = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
__lowerCAmelCase : Union[str, Any] = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__lowerCAmelCase : List[Any] = tf.not_equal(lowerCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
__lowerCAmelCase : Optional[Any] = model(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase ).last_hidden_state
__lowerCAmelCase : str = (1, 11, 5_12)
self.assertEqual(output.shape , lowerCAmelCase )
__lowerCAmelCase : List[str] = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase , atol=4e-3 ) )
__lowerCAmelCase : Any = tf.function(lowerCAmelCase , jit_compile=lowerCAmelCase )
__lowerCAmelCase : Tuple = xla_generate(lowerCAmelCase , lowerCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase , atol=4e-2 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : Tuple = """facebook/opt-350m"""
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCAmelCase : int = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCAmelCase : List[Any] = tokenizer(lowerCAmelCase , return_tensors="""tf""" , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCAmelCase : Union[str, Any] = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-4 ) )
__lowerCAmelCase : Dict = tf.function(lowerCAmelCase , jit_compile=lowerCAmelCase )
__lowerCAmelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-4 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
"""simple docstring"""
__lowerCAmelCase : str = """facebook/opt-125m"""
__lowerCAmelCase : List[str] = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__lowerCAmelCase : str = []
__lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(lowerCAmelCase )
__lowerCAmelCase : Tuple = TFOPTForCausalLM.from_pretrained(lowerCAmelCase )
for prompt in self.prompts:
__lowerCAmelCase : List[Any] = tokenizer(lowerCAmelCase , return_tensors="""tf""" ).input_ids
__lowerCAmelCase : str = model.generate(lowerCAmelCase , max_length=10 )
__lowerCAmelCase : Dict = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = """facebook/opt-350m"""
__lowerCAmelCase : int = GPTaTokenizer.from_pretrained(lowerCAmelCase )
__lowerCAmelCase : Tuple = TFOPTForCausalLM.from_pretrained(lowerCAmelCase )
__lowerCAmelCase : str = """left"""
# use different length sentences to test batching
__lowerCAmelCase : Any = [
"""Hello, my dog is a little""",
"""Today, I""",
]
__lowerCAmelCase : int = tokenizer(lowerCAmelCase , return_tensors="""tf""" , padding=lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = inputs["""input_ids"""]
__lowerCAmelCase : Any = model.generate(input_ids=lowerCAmelCase , attention_mask=inputs["""attention_mask"""] )
__lowerCAmelCase : List[Any] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
__lowerCAmelCase : Tuple = model.generate(input_ids=lowerCAmelCase )
__lowerCAmelCase : Any = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
__lowerCAmelCase : List[Any] = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
__lowerCAmelCase : Optional[int] = model.generate(input_ids=lowerCAmelCase , max_length=model.config.max_length - num_paddings )
__lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase )
__lowerCAmelCase : str = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , [non_padded_sentence, padded_sentence] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = """facebook/opt-350m"""
__lowerCAmelCase : Dict = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowerCAmelCase )
__lowerCAmelCase : int = TFOPTForCausalLM.from_pretrained(lowerCAmelCase )
for prompt in self.prompts:
__lowerCAmelCase : Any = tokenizer(lowerCAmelCase , return_tensors="""tf""" ).input_ids
__lowerCAmelCase : Union[str, Any] = model.generate(lowerCAmelCase , max_length=10 )
__lowerCAmelCase : Any = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
| 139 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def snake_case_ (__A : int ) -> List[Any]:
random.seed(__A )
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# ^^ safe to call this function even if cuda is not available
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : Iterable[torch.nn.Parameter] , lowerCAmelCase : float = 0.9999 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 0 , lowerCAmelCase : bool = False , lowerCAmelCase : Union[float, int] = 1.0 , lowerCAmelCase : Union[float, int] = 2 / 3 , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : Dict[str, Any] = None , **lowerCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
if isinstance(lowerCAmelCase , torch.nn.Module ):
__lowerCAmelCase : Optional[int] = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase , standard_warn=lowerCAmelCase , )
__lowerCAmelCase : int = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__lowerCAmelCase : Optional[Any] = True
if kwargs.get("""max_value""" , lowerCAmelCase ) is not None:
__lowerCAmelCase : Optional[Any] = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase , standard_warn=lowerCAmelCase )
__lowerCAmelCase : Dict = kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase ) is not None:
__lowerCAmelCase : Optional[int] = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase , standard_warn=lowerCAmelCase )
__lowerCAmelCase : Any = kwargs["""min_value"""]
__lowerCAmelCase : Optional[Any] = list(lowerCAmelCase )
__lowerCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase ) is not None:
__lowerCAmelCase : str = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase , standard_warn=lowerCAmelCase )
self.to(device=kwargs["""device"""] )
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Any = decay
__lowerCAmelCase : Any = min_decay
__lowerCAmelCase : List[Any] = update_after_step
__lowerCAmelCase : Optional[Any] = use_ema_warmup
__lowerCAmelCase : Union[str, Any] = inv_gamma
__lowerCAmelCase : Union[str, Any] = power
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : str = None # set in `step()`
__lowerCAmelCase : int = model_cls
__lowerCAmelCase : List[Any] = model_config
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Dict ) -> "EMAModel":
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = model_cls.load_config(lowerCAmelCase , return_unused_kwargs=lowerCAmelCase )
__lowerCAmelCase : Any = model_cls.from_pretrained(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = cls(model.parameters() , model_cls=lowerCAmelCase , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase )
return ema_model
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
__lowerCAmelCase : Dict = self.model_cls.from_config(self.model_config )
__lowerCAmelCase : Optional[Any] = self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase )
model.register_to_config(**lowerCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : int ) -> float:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__lowerCAmelCase : str = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__lowerCAmelCase : Optional[int] = (1 + step) / (10 + step)
__lowerCAmelCase : Any = min(lowerCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
__lowerCAmelCase : Union[str, Any] = max(lowerCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Iterable[torch.nn.Parameter] ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCAmelCase , torch.nn.Module ):
__lowerCAmelCase : int = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase , standard_warn=lowerCAmelCase , )
__lowerCAmelCase : Any = parameters.parameters()
__lowerCAmelCase : Any = list(lowerCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__lowerCAmelCase : Optional[Any] = self.get_decay(self.optimization_step )
__lowerCAmelCase : Dict = decay
__lowerCAmelCase : Tuple = 1 - decay
__lowerCAmelCase : str = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__lowerCAmelCase : List[Any] = deepspeed.zero.GatheredParameters(lowerCAmelCase , modifier_rank=lowerCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
__lowerCAmelCase : Dict = list(lowerCAmelCase )
for s_param, param in zip(self.shadow_params , lowerCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=None ) -> None:
"""simple docstring"""
__lowerCAmelCase : List[str] = [
p.to(device=lowerCAmelCase , dtype=lowerCAmelCase ) if p.is_floating_point() else p.to(device=lowerCAmelCase )
for p in self.shadow_params
]
def SCREAMING_SNAKE_CASE ( self : int ) -> dict:
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
__lowerCAmelCase : Dict = [param.detach().cpu().clone() for param in parameters]
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
__lowerCAmelCase : Union[str, Any] = None
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : dict ) -> None:
"""simple docstring"""
__lowerCAmelCase : List[str] = copy.deepcopy(lowerCAmelCase )
__lowerCAmelCase : int = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
__lowerCAmelCase : Tuple = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase ):
raise ValueError("""Invalid min_decay""" )
__lowerCAmelCase : List[Any] = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase ):
raise ValueError("""Invalid optimization_step""" )
__lowerCAmelCase : Union[str, Any] = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase ):
raise ValueError("""Invalid update_after_step""" )
__lowerCAmelCase : List[str] = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase ):
raise ValueError("""Invalid use_ema_warmup""" )
__lowerCAmelCase : Optional[int] = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
__lowerCAmelCase : Any = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
__lowerCAmelCase : Optional[Any] = state_dict.get("""shadow_params""" , lowerCAmelCase )
if shadow_params is not None:
__lowerCAmelCase : int = shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 139 | 1 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCAmelCase__ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Optional[NestedDataStructureLike[PathLike]] = None , __lowerCamelCase : Optional[NamedSplit] = None , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : int , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = path_or_paths
SCREAMING_SNAKE_CASE__ = split if split or isinstance(__a , __a ) else '''train'''
SCREAMING_SNAKE_CASE__ = features
SCREAMING_SNAKE_CASE__ = cache_dir
SCREAMING_SNAKE_CASE__ = keep_in_memory
SCREAMING_SNAKE_CASE__ = streaming
SCREAMING_SNAKE_CASE__ = num_proc
SCREAMING_SNAKE_CASE__ = kwargs
@abstractmethod
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
pass
class UpperCAmelCase__ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : List[Any] , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = features
SCREAMING_SNAKE_CASE__ = cache_dir
SCREAMING_SNAKE_CASE__ = keep_in_memory
SCREAMING_SNAKE_CASE__ = streaming
SCREAMING_SNAKE_CASE__ = num_proc
SCREAMING_SNAKE_CASE__ = kwargs
@abstractmethod
def lowercase_ ( self : List[Any] ) -> Optional[int]:
pass
| 314 |
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ):
_a = 0.0
_a = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowercase , lowercase )
# Update the winning vector
_a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowercase , lowercase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( a_: Tuple ) -> List[str]:
_UpperCAmelCase : int = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
_UpperCAmelCase : Any = True if '''large''' in model_name or '''huge''' in model_name else False
_UpperCAmelCase : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
_UpperCAmelCase : Tuple = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_UpperCAmelCase : Any = [3, 3, 3, 3]
_UpperCAmelCase : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
_UpperCAmelCase : Optional[int] = [4, 4, 4, 4]
_UpperCAmelCase : Dict = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_UpperCAmelCase : Tuple = [3, 3, 3, 3]
if "lrf" in model_name:
_UpperCAmelCase : Dict = [3, 3, 3, 3]
else:
_UpperCAmelCase : List[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
_UpperCAmelCase : Any = 96
elif "small" in model_name:
_UpperCAmelCase : Optional[int] = 96
elif "base" in model_name:
_UpperCAmelCase : Optional[Any] = 128
elif "large" in model_name:
_UpperCAmelCase : List[str] = 192
elif "xlarge" in model_name:
_UpperCAmelCase : str = 256
elif "huge" in model_name:
_UpperCAmelCase : Dict = 352
# set label information
_UpperCAmelCase : Tuple = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
_UpperCAmelCase : Any = '''imagenet-22k-id2label.json'''
else:
_UpperCAmelCase : Dict = '''imagenet-1k-id2label.json'''
_UpperCAmelCase : Dict = json.load(open(hf_hub_download(__UpperCAmelCase, __UpperCAmelCase, repo_type="dataset" ), "r" ) )
_UpperCAmelCase : int = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : Any = FocalNetConfig(
embed_dim=__UpperCAmelCase, depths=__UpperCAmelCase, focal_levels=__UpperCAmelCase, focal_windows=__UpperCAmelCase, use_conv_embed=__UpperCAmelCase, idalabel=__UpperCAmelCase, labelaid=__UpperCAmelCase, use_post_layernorm=__UpperCAmelCase, use_layerscale=__UpperCAmelCase, )
return config
def __UpperCAmelCase ( a_: List[Any] ) -> Optional[Any]:
if "patch_embed.proj" in name:
_UpperCAmelCase : Dict = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_UpperCAmelCase : Any = name.replace("patch_embed.norm", "embeddings.norm" )
if "layers" in name:
_UpperCAmelCase : Union[str, Any] = '''encoder.''' + name
if "encoder.layers" in name:
_UpperCAmelCase : List[Any] = name.replace("encoder.layers", "encoder.stages" )
if "downsample.proj" in name:
_UpperCAmelCase : List[str] = name.replace("downsample.proj", "downsample.projection" )
if "blocks" in name:
_UpperCAmelCase : Dict = name.replace("blocks", "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_UpperCAmelCase : Any = name.replace("modulation.f", "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_UpperCAmelCase : Any = name.replace("modulation.h", "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_UpperCAmelCase : Optional[Any] = name.replace("modulation.proj", "modulation.projection_out" )
if name == "norm.weight":
_UpperCAmelCase : Dict = '''layernorm.weight'''
if name == "norm.bias":
_UpperCAmelCase : str = '''layernorm.bias'''
if "head" in name:
_UpperCAmelCase : Dict = name.replace("head", "classifier" )
else:
_UpperCAmelCase : int = '''focalnet.''' + name
return name
def __UpperCAmelCase ( a_: Tuple, a_: Dict, a_: Any=False ) -> Optional[int]:
# fmt: off
_UpperCAmelCase : Any = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
_UpperCAmelCase : Union[str, Any] = model_name_to_url[model_name]
print("Checkpoint URL: ", __UpperCAmelCase )
_UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(__UpperCAmelCase, map_location="cpu" )['''model''']
# rename keys
for key in state_dict.copy().keys():
_UpperCAmelCase : Optional[Any] = state_dict.pop(__UpperCAmelCase )
_UpperCAmelCase : str = val
_UpperCAmelCase : Optional[int] = get_focalnet_config(__UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = FocalNetForImageClassification(__UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(__UpperCAmelCase )
# verify conversion
_UpperCAmelCase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCAmelCase : Optional[Any] = BitImageProcessor(
do_resize=__UpperCAmelCase, size={"shortest_edge": 256}, resample=PILImageResampling.BILINEAR, do_center_crop=__UpperCAmelCase, crop_size=224, do_normalize=__UpperCAmelCase, image_mean=__UpperCAmelCase, image_std=__UpperCAmelCase, )
_UpperCAmelCase : List[str] = Image.open(requests.get(__UpperCAmelCase, stream=__UpperCAmelCase ).raw )
_UpperCAmelCase : Optional[int] = processor(images=__UpperCAmelCase, return_tensors="pt" )
_UpperCAmelCase : Optional[int] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06], std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_UpperCAmelCase : str = image_transforms(__UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values, __UpperCAmelCase, atol=1e-4 )
_UpperCAmelCase : str = model(**__UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
print("Predicted class:", model.config.idalabel[predicted_class_idx] )
print("First values of logits:", outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_UpperCAmelCase : Union[str, Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_UpperCAmelCase : Union[str, Any] = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_UpperCAmelCase : List[Any] = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_UpperCAmelCase : List[Any] = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_UpperCAmelCase : Union[str, Any] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_UpperCAmelCase : Any = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3], __UpperCAmelCase, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__a = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 352 | '''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCAmelCase ( a_: str ):
for param in module.parameters():
_UpperCAmelCase : Any = False
def __UpperCAmelCase ( ):
_UpperCAmelCase : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : int = plt.imshow(a_ )
fig.axes.get_xaxis().set_visible(a_ )
fig.axes.get_yaxis().set_visible(a_ )
plt.show()
def __UpperCAmelCase ( ):
_UpperCAmelCase : Dict = datetime.now()
_UpperCAmelCase : List[str] = current_time.strftime("%H:%M:%S" )
return timestamp | 17 | 0 |
import logging
import os
from .state import PartialState
class _a (logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def __A ( A__ ):
A__ : Union[str, Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __A ( self , A__ , A__ , *A__ , **A__ ):
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
A__ : Dict = kwargs.pop("""main_process_only""" , A__ )
A__ : Optional[int] = kwargs.pop("""in_order""" , A__ )
if self.isEnabledFor(A__ ):
if self._should_log(A__ ):
A__ , A__ : Dict = self.process(A__ , A__ )
self.logger.log(A__ , A__ , *A__ , **A__ )
elif in_order:
A__ : Optional[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
A__ , A__ : str = self.process(A__ , A__ )
self.logger.log(A__ , A__ , *A__ , **A__ )
state.wait_for_everyone()
def UpperCamelCase (lowercase_: str , lowercase_: str = None ) -> Optional[int]:
if log_level is None:
A__ : Optional[int] = os.environ.get("""ACCELERATE_LOG_LEVEL""" , lowercase_ )
A__ : str = logging.getLogger(lowercase_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowercase_ , {} )
| 192 |
import argparse
from collections import defaultdict
import yaml
A_ : List[str] = 'docs/source/en/_toctree.yml'
def UpperCamelCase (lowercase_: Optional[int] ) -> List[str]:
A__ : Dict = defaultdict(lowercase_ )
A__ : Optional[int] = []
A__ : Union[str, Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(lowercase_ )
A__ : Optional[int] = new_doc_list
A__ : Optional[int] = [key for key, value in counts.items() if value > 1]
A__ : Optional[Any] = []
for duplicate_key in duplicates:
A__ : List[Any] = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(lowercase_ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
A__ : Dict = sorted(lowercase_ , key=lambda lowercase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowercase_ ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(lowercase_ )
# Sort
return overview_doc
def UpperCamelCase (lowercase_: Tuple=False ) -> List[Any]:
with open(lowercase_ , encoding="""utf-8""" ) as f:
A__ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
A__ : List[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ : Union[str, Any] = content[api_idx]["""sections"""]
# Then to the model doc
A__ : Dict = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
A__ : List[Any] = api_doc[scheduler_idx]["""sections"""]
A__ : Union[str, Any] = clean_doc_toc(lowercase_ )
A__ : Optional[int] = False
if new_scheduler_doc != scheduler_doc:
A__ : List[Any] = True
if overwrite:
A__ : Optional[int] = new_scheduler_doc
if diff:
if overwrite:
A__ : Tuple = api_doc
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowercase_ , allow_unicode=lowercase_ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def UpperCamelCase (lowercase_: Dict=False ) -> Optional[Any]:
with open(lowercase_ , encoding="""utf-8""" ) as f:
A__ : int = yaml.safe_load(f.read() )
# Get to the API doc
A__ : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ : List[str] = content[api_idx]["""sections"""]
# Then to the model doc
A__ : List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
A__ : Dict = False
A__ : Tuple = api_doc[pipeline_idx]["""sections"""]
A__ : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
A__ : List[Any] = pipeline_doc["""section"""]
A__ : Dict = clean_doc_toc(lowercase_ )
if overwrite:
A__ : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(lowercase_ )
# sort overall pipeline doc
A__ : Optional[int] = clean_doc_toc(lowercase_ )
if new_pipeline_docs != pipeline_docs:
A__ : int = True
if overwrite:
A__ : List[Any] = new_pipeline_docs
if diff:
if overwrite:
A__ : Union[str, Any] = api_doc
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowercase_ , allow_unicode=lowercase_ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A_ : str = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 192 | 1 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[int] , *,
UpperCAmelCase_ : int = 4 , UpperCAmelCase_ : int = 768 , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , ):
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.zeros(UpperCAmelCase_ ) )
# parameters for additional clip time embeddings
SCREAMING_SNAKE_CASE : Any = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
# parameters for encoder hidden states
SCREAMING_SNAKE_CASE : Optional[Any] = clip_extra_context_tokens
SCREAMING_SNAKE_CASE : str = nn.Linear(
UpperCAmelCase_ , self.clip_extra_context_tokens * cross_attention_dim )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = nn.LayerNorm(UpperCAmelCase_ )
def _A ( self : Dict , *, UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
SCREAMING_SNAKE_CASE : str = image_embeddings.shape[0]
SCREAMING_SNAKE_CASE : Any = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_free_guidance_embeddings.expand(
UpperCAmelCase_ , -1 )
SCREAMING_SNAKE_CASE : int = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
SCREAMING_SNAKE_CASE : Optional[Any] = self.embedding_proj(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.clip_image_embeddings_project_to_time_embeddings(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.clip_extra_context_tokens_proj(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = clip_extra_context_tokens.reshape(UpperCAmelCase_ , -1 , self.clip_extra_context_tokens )
SCREAMING_SNAKE_CASE : List[str] = clip_extra_context_tokens.permute(0 , 2 , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.encoder_hidden_states_proj(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = self.text_encoder_hidden_states_norm(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 362 |
def lowerCamelCase__ ( ):
"""simple docstring"""
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
snake_case = generate_large_matrix()
snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
assert all(row == sorted(lowercase , reverse=lowercase ) for row in grid )
assert all(list(lowercase ) == sorted(lowercase , reverse=lowercase ) for col in zip(*lowercase ) )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE : List[Any] = (left + right) // 2
SCREAMING_SNAKE_CASE : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE : List[Any] = mid + 1
else:
SCREAMING_SNAKE_CASE : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : List[str] = len(grid[0] )
for i in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase ) * len(grid[0] )) - total
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 0
for row in grid:
for i, number in enumerate(lowercase ):
if number < 0:
total += len(lowercase ) - i
break
return total
def lowerCamelCase__ ( ):
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
SCREAMING_SNAKE_CASE : List[str] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(F'''{func}(grid=grid)''' , setup=lowercase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 319 | 0 |
import sys
from collections import defaultdict
class lowercase__ :
def __init__( self : str ):
SCREAMING_SNAKE_CASE__ = []
def A_ ( self : Tuple , UpperCAmelCase_ : List[str] ):
return self.node_position[vertex]
def A_ ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = pos
def A_ ( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = temp, tempa
SCREAMING_SNAKE_CASE__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCAmelCase_ )
self.top_to_bottom(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ = heap[parent]
SCREAMING_SNAKE_CASE__ = position[parent]
self.set_position(position[parent] , UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = val
SCREAMING_SNAKE_CASE__ = temp
self.set_position(UpperCAmelCase_ , UpperCAmelCase_ )
break
SCREAMING_SNAKE_CASE__ = parent
else:
SCREAMING_SNAKE_CASE__ = val
SCREAMING_SNAKE_CASE__ = temp
self.set_position(UpperCAmelCase_ , 0 )
def A_ ( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ ) // 2 - 1
for i in range(UpperCAmelCase_ , -1 , -1 ):
self.top_to_bottom(UpperCAmelCase_ , UpperCAmelCase_ , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = positions[0]
SCREAMING_SNAKE_CASE__ = sys.maxsize
self.top_to_bottom(UpperCAmelCase_ , 0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return temp
def _lowercase ( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Heap()
SCREAMING_SNAKE_CASE__ = [0] * len(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ = []
for vertex in range(len(UpperCamelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase_ )
heap.node_position.append(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = distance
heap.heapify(UpperCamelCase_ , UpperCamelCase_ )
for _ in range(1 , len(UpperCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_ )]
):
SCREAMING_SNAKE_CASE__ = distance
heap.bottom_to_top(
UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__snake_case = int(input("""Enter number of edges: """).strip())
__snake_case = defaultdict(list)
for _ in range(edges_number):
__snake_case = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 176 |
import os
from math import logaa
def _lowercase ( UpperCamelCase_ = "base_exp.txt" ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCamelCase_ ) , UpperCamelCase_ ) ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = list(map(UpperCamelCase_ , line.split(',' ) ) )
if x * logaa(UpperCamelCase_ ) > largest:
SCREAMING_SNAKE_CASE__ = x * logaa(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 176 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _A ( ) -> List[str]:
raise RuntimeError("CUDA out of memory." )
class a__ ( nn.Module ):
def __init__( self ):
"""simple docstring"""
super().__init__()
_lowercase : List[str] = nn.Linear(3 , 4 )
_lowercase : List[str] = nn.BatchNormad(4 )
_lowercase : Optional[Any] = nn.Linear(4 , 5 )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(_UpperCamelCase ) ) )
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_UpperCamelCase ):
nonlocal batch_sizes
batch_sizes.append(_UpperCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_UpperCamelCase , [128, 64, 32, 16, 8] )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_UpperCamelCase , _UpperCamelCase ):
nonlocal batch_sizes
batch_sizes.append(_UpperCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_lowercase , _lowercase : List[str] = mock_training_loop_function("hello" )
self.assertListEqual(_UpperCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def _lowerCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_UpperCamelCase ):
pass
with self.assertRaises(_UpperCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def _lowerCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_UpperCamelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_UpperCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def _lowerCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_UpperCamelCase ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def _lowerCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_UpperCamelCase ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(_UpperCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = torch.cuda.memory_allocated()
_lowercase : List[Any] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _UpperCamelCase )
_lowercase : Optional[Any] = release_memory(_UpperCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , _UpperCamelCase )
| 199 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = StableUnCLIPImgaImgPipeline
_SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE : List[Any] = frozenset([] )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = 32
_lowercase : Any = embedder_hidden_size
# image encoding components
_lowercase : Optional[int] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
_lowercase : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_UpperCamelCase , projection_dim=_UpperCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
_lowercase : str = StableUnCLIPImageNormalizer(embedding_dim=_UpperCamelCase )
_lowercase : Dict = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
_lowercase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_lowercase : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_lowercase : List[str] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCamelCase , layers_per_block=1 , upcast_attention=_UpperCamelCase , use_linear_projection=_UpperCamelCase , )
torch.manual_seed(0 )
_lowercase : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="v_prediction" , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
_lowercase : Dict = AutoencoderKL()
_lowercase : int = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=0 , _UpperCamelCase=True ):
"""simple docstring"""
if str(_UpperCamelCase ).startswith("mps" ):
_lowercase : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
_lowercase : List[str] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_lowercase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
if pil_image:
_lowercase : Optional[Any] = input_image * 0.5 + 0.5
_lowercase : Optional[int] = input_image.clamp(0 , 1 )
_lowercase : str = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowercase : int = DiffusionPipeline.numpy_to_pil(_UpperCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : List[str] = self.get_dummy_components()
_lowercase : str = StableUnCLIPImgaImgPipeline(**_UpperCamelCase )
_lowercase : int = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : str = self.get_dummy_inputs(_UpperCamelCase )
inputs.update({"image_embeds": None} )
_lowercase : Tuple = sd_pipe(**_UpperCamelCase ).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_UpperCamelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_UpperCamelCase )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
_lowercase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
_lowercase : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : int = torch.Generator(device="cpu" ).manual_seed(0 )
_lowercase : List[Any] = pipe(_UpperCamelCase , "anime turle" , generator=_UpperCamelCase , output_type="np" )
_lowercase : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
_lowercase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
_lowercase : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
_lowercase : Any = pipe(_UpperCamelCase , "anime turle" , generator=_UpperCamelCase , output_type="np" )
_lowercase : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowercase : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
_lowercase : Tuple = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : Optional[int] = pipe(
_UpperCamelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
_lowercase : str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 199 | 1 |
import unittest
from knapsack import knapsack as k
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Dict = [0]
lowerCAmelCase_ : List[str] = [0]
lowerCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 0 )
lowerCAmelCase_ : Optional[int] = [6_0]
lowerCAmelCase_ : List[Any] = [1_0]
lowerCAmelCase_ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 0 )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Tuple = 3
lowerCAmelCase_ : List[str] = [1, 2, 3]
lowerCAmelCase_ : Tuple = [3, 2, 1]
lowerCAmelCase_ : Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 5 )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : int = 5_0
lowerCAmelCase_ : List[Any] = [6_0, 1_0_0, 1_2_0]
lowerCAmelCase_ : List[Any] = [1_0, 2_0, 3_0]
lowerCAmelCase_ : str = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 2_2_0 )
if __name__ == "__main__":
unittest.main() | 262 |
from jiwer import compute_measures
import datasets
lowerCAmelCase : Tuple = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
lowerCAmelCase : List[Any] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
lowerCAmelCase : Dict = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _A ( datasets.Metric):
def UpperCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["wer"]
else:
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
for prediction, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = compute_measures(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 253 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase :
_a = LEDConfig
_a = {}
_a = "gelu"
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=4 , ) -> Any:
_A : List[Any] = parent
_A : Tuple = batch_size
_A : List[Any] = seq_length
_A : List[Any] = is_training
_A : str = use_labels
_A : List[str] = vocab_size
_A : Union[str, Any] = hidden_size
_A : List[str] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : str = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Optional[Any] = max_position_embeddings
_A : List[Any] = eos_token_id
_A : str = pad_token_id
_A : Tuple = bos_token_id
_A : List[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_A : Optional[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_A : Optional[int] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def a__ ( self ) -> List[str]:
_A : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_A : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_A : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_A : Dict = prepare_led_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_A : Optional[Any] = tf.concat(
[tf.zeros_like(_lowerCamelCase )[:, :-1], tf.ones_like(_lowerCamelCase )[:, -1:]] , axis=-1 , )
_A : Optional[int] = global_attention_mask
return config, inputs_dict
def a__ ( self , _a , _a ) -> Union[str, Any]:
_A : Optional[int] = TFLEDModel(config=_lowerCamelCase ).get_decoder()
_A : str = inputs_dict['''input_ids''']
_A : Optional[Any] = input_ids[:1, :]
_A : Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
_A : List[Any] = 1
# first forward pass
_A : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
_A : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_A : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_A : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_A : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_A : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
_A : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_A : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_A : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_A : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3 )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_=None,snake_case_=None,snake_case_=None,snake_case_=None,):
if attention_mask is None:
_A : List[str] = tf.cast(tf.math.not_equal(lowerCamelCase__,config.pad_token_id ),tf.inta )
if decoder_attention_mask is None:
_A : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:],config.pad_token_id ),tf.inta ),
],axis=-1,)
if head_mask is None:
_A : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_A : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase ( a__,a__,unittest.TestCase ):
_a = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_a = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_a = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
def a__ ( self ) -> List[str]:
_A : Tuple = TFLEDModelTester(self )
_A : Tuple = ConfigTester(self , config_class=_lowerCamelCase )
def a__ ( self ) -> Any:
self.config_tester.run_common_tests()
def a__ ( self ) -> int:
_A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
def a__ ( self ) -> Optional[int]:
_A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : str = tf.zeros_like(inputs_dict["""attention_mask"""] )
_A : str = 2
_A : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
_A : str = True
_A : Dict = self.model_tester.seq_length
_A : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a ):
_A : str = outputs.decoder_attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_a ):
_A : Optional[int] = [t.numpy() for t in outputs.encoder_attentions]
_A : Any = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_A : Optional[int] = True
_A : str = False
_A : Optional[Any] = False
_A : List[Any] = model_class(_lowerCamelCase )
_A : str = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
_A : int = len(_lowerCamelCase )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
if self.is_encoder_decoder:
_A : str = model_class(_lowerCamelCase )
_A : Dict = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_decoder_attentions_output(_lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_A : List[Any] = True
_A : Optional[int] = model_class(_lowerCamelCase )
_A : Dict = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
# Check attention is always last and order is fine
_A : Tuple = True
_A : Dict = True
_A : Union[str, Any] = model_class(_lowerCamelCase )
_A : List[str] = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> str:
pass
def lowerCAmelCase_ ( snake_case_ ):
return tf.constant(lowerCamelCase__,dtype=tf.intaa )
_snake_case = 1e-4
@slow
@require_tf
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Optional[int]:
_A : List[Any] = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
_A : Any = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_A : List[Any] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_A : List[Any] = prepare_led_inputs_dict(model.config , _lowerCamelCase , _lowerCamelCase )
_A : Tuple = model(**_lowerCamelCase )[0]
_A : Dict = (1, 1024, 768)
self.assertEqual(output.shape , _lowerCamelCase )
# change to expected output here
_A : Optional[Any] = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1e-3 )
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
_A : Optional[Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_A : List[Any] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_A : Union[str, Any] = prepare_led_inputs_dict(model.config , _lowerCamelCase , _lowerCamelCase )
_A : Any = model(**_lowerCamelCase )[0]
_A : Optional[int] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , _lowerCamelCase )
# change to expected output here
_A : int = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1e-3 , rtol=1e-3 )
| 354 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "audio-spectrogram-transformer"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=16 , _a=True , _a=10 , _a=10 , _a=1024 , _a=128 , **_a , ) -> List[Any]:
super().__init__(**_a )
_A : Any = hidden_size
_A : Tuple = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Any = intermediate_size
_A : Optional[Any] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Optional[Any] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : str = patch_size
_A : Tuple = qkv_bias
_A : Dict = frequency_stride
_A : Union[str, Any] = time_stride
_A : Any = max_length
_A : Tuple = num_mel_bins
| 343 | 0 |
from __future__ import annotations
from typing import Any
def _a ( UpperCAmelCase ) -> None:
"""simple docstring"""
create_state_space_tree(UpperCAmelCase , [] , 0 )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> None:
"""simple docstring"""
if index == len(UpperCAmelCase ):
print(UpperCAmelCase )
return
create_state_space_tree(UpperCAmelCase , UpperCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCAmelCase , UpperCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_A : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 142 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Dict = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowerCamelCase__ : Any = s_dict.pop(UpperCAmelCase )
elif "subsample" in key:
lowerCamelCase__ : Any = s_dict.pop(UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = emb.weight.shape
lowerCamelCase__ : str = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = emb.weight.data
return lin_layer
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = torch.load(UpperCAmelCase , map_location='''cpu''' )
lowerCamelCase__ : List[Any] = mam_aaa['''args''']
lowerCamelCase__ : Dict = mam_aaa['''model''']
lowerCamelCase__ : Optional[Any] = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(UpperCAmelCase )
rename_keys(UpperCAmelCase )
lowerCamelCase__ : Tuple = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCamelCase__ : Tuple = args.share_decoder_input_output_embed
lowerCamelCase__ : Dict = [int(UpperCAmelCase ) for i in args.conv_kernel_sizes.split(''',''' )]
lowerCamelCase__ : str = SpeechaTextConfig(
vocab_size=UpperCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(UpperCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCAmelCase , num_beams=5 , max_length=200 , use_cache=UpperCAmelCase , decoder_start_token_id=2 , early_stopping=UpperCAmelCase , )
lowerCamelCase__ : Optional[int] = SpeechaTextForConditionalGeneration(UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
if len(UpperCAmelCase ) > 0 and not set(UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase__ : Tuple = lm_head_weights
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_A : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 142 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowercase_ :
'''simple docstring'''
__snake_case = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__snake_case = field(
default=_lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__snake_case = field(
default=_lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__snake_case = field(
default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__snake_case = field(
default=_lowercase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
__snake_case = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__snake_case = field(
default=_lowercase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class lowercase_ :
'''simple docstring'''
__snake_case = field(default=_lowercase , metadata={'''help''': '''The input training data file (a text file).'''} )
__snake_case = field(
default=_lowercase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
__snake_case = field(
default=_lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__snake_case = field(
default=_lowercase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__snake_case = field(
default=_lowercase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__snake_case = field(
default=_lowercase , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
__snake_case = field(
default=_lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__snake_case = field(
default=_lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
if self.train_file is not None:
a = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
a = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowercase_ :
'''simple docstring'''
__snake_case = 42
__snake_case = True
__snake_case = None
__snake_case = None
def __call__( self : List[str] , __UpperCAmelCase : List[Any] ) ->Dict:
"""simple docstring"""
a = '''label''' if '''label''' in features[0].keys() else '''labels'''
a = [feature.pop(__UpperCamelCase ) for feature in features]
a = len(__UpperCamelCase )
a = len(features[0]['''input_ids'''] )
a = [
[{k: v[i] for k, v in feature.items()} for i in range(__UpperCamelCase )] for feature in features
]
a = list(chain(*__UpperCamelCase ) )
a = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
a = {k: v.view(__UpperCamelCase , __UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
a = torch.tensor(__UpperCamelCase , dtype=torch.intaa )
return batch
def _a ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , a__ , a__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a = training_args.get_process_log_level()
logger.setLevel(a__ )
datasets.utils.logging.set_verbosity(a__ )
transformers.utils.logging.set_verbosity(a__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
a = {}
if data_args.train_file is not None:
a = data_args.train_file
if data_args.validation_file is not None:
a = data_args.validation_file
a = data_args.train_file.split('''.''' )[-1]
a = load_dataset(
a__ , data_files=a__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
a = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
a = [F"""ending{i}""" for i in range(4 )]
a = '''sent1'''
a = '''sent2'''
if data_args.max_seq_length is None:
a = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
a = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
a = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(a :Any ):
a = [[context] * 4 for context in examples[context_name]]
a = examples[question_header_name]
a = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(a__ )
]
# Flatten out
a = list(chain(*a__ ) )
a = list(chain(*a__ ) )
# Tokenize
a = tokenizer(
a__ , a__ , truncation=a__ , max_length=a__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(a__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
a = raw_datasets['''train''']
if data_args.max_train_samples is not None:
a = min(len(a__ ) , data_args.max_train_samples )
a = train_dataset.select(range(a__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
a = train_dataset.map(
a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
a = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
a = min(len(a__ ) , data_args.max_eval_samples )
a = eval_dataset.select(range(a__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
a = eval_dataset.map(
a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
a = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=a__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(a :List[str] ):
a , a = eval_predictions
a = np.argmax(a__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
a = Trainer(
model=a__ , args=a__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=a__ , data_collator=a__ , compute_metrics=a__ , )
# Training
if training_args.do_train:
a = None
if training_args.resume_from_checkpoint is not None:
a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a = last_checkpoint
a = trainer.train(resume_from_checkpoint=a__ )
trainer.save_model() # Saves the tokenizer too for easy upload
a = train_result.metrics
a = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a__ )
)
a = min(a__ , len(a__ ) )
trainer.log_metrics('''train''' , a__ )
trainer.save_metrics('''train''' , a__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
a = trainer.evaluate()
a = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a__ )
a = min(a__ , len(a__ ) )
trainer.log_metrics('''eval''' , a__ )
trainer.save_metrics('''eval''' , a__ )
a = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**a__ )
else:
trainer.create_model_card(**a__ )
def _a ( a :Tuple ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 359 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _a ( a :Union[str, Any] , a :List[Any] ) -> List[Any]:
a = checkpoint
a = {}
a = vae_state_dict['''encoder.conv_in.weight''']
a = vae_state_dict['''encoder.conv_in.bias''']
a = vae_state_dict['''encoder.conv_out.weight''']
a = vae_state_dict['''encoder.conv_out.bias''']
a = vae_state_dict['''encoder.norm_out.weight''']
a = vae_state_dict['''encoder.norm_out.bias''']
a = vae_state_dict['''decoder.conv_in.weight''']
a = vae_state_dict['''decoder.conv_in.bias''']
a = vae_state_dict['''decoder.conv_out.weight''']
a = vae_state_dict['''decoder.conv_out.bias''']
a = vae_state_dict['''decoder.norm_out.weight''']
a = vae_state_dict['''decoder.norm_out.bias''']
a = vae_state_dict['''quant_conv.weight''']
a = vae_state_dict['''quant_conv.bias''']
a = vae_state_dict['''post_quant_conv.weight''']
a = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
a = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(a )
}
# Retrieves the keys for the decoder up blocks only
a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
a = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(a )
}
for i in range(a ):
a = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
a = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
a = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""down.{i}.block""", '''new''': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
a = renew_vae_attention_paths(a )
a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
for i in range(a ):
a = num_up_blocks - 1 - i
a = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
a = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
a = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""up.{block_id}.block""", '''new''': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
a = renew_vae_attention_paths(a )
a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
return new_checkpoint
def _a ( a :str , a :str , ) -> List[str]:
# Only support V1
a = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
a = io.BytesIO(r.content )
a = OmegaConf.load(a )
a = 512
a = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
a = {}
with safe_open(a , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
a = f.get_tensor(a )
else:
a = torch.load(a , map_location=a )['''state_dict''']
# Convert the VAE model.
a = create_vae_diffusers_config(a , image_size=a )
a = custom_convert_ldm_vae_checkpoint(a , a )
a = AutoencoderKL(**a )
vae.load_state_dict(a )
vae.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
UpperCAmelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 26 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A ( UpperCAmelCase__ ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Any = tempfile.mkdtemp()
A : str = 5
# Realm tok
A : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A : Union[str, Any] = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
A : int = os.path.join(snake_case_ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A : List[str] = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = RealmConfig(num_block_records=self.num_block_records )
return config
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Any = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : str = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=snake_case_ , )
return block_records
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Dict = self.get_config()
A : Tuple = self.get_dummy_retriever()
A : Optional[Any] = retriever.tokenizer
A : Dict = np.array([0, 3] , dtype='''long''' )
A : int = tokenizer(['''Test question'''] ).input_ids
A : Dict = tokenizer(
['''the fourth'''] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids
A : Tuple = config.reader_seq_len
A : int = retriever(
snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors='''np''' )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : str = self.get_config()
A : int = self.get_dummy_retriever()
A : Dict = retriever.tokenizer
A : int = np.array([0, 3, 5] , dtype='''long''' )
A : Union[str, Any] = tokenizer(['''Test question'''] ).input_ids
A : str = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , ).input_ids
A : Any = config.reader_seq_len
A : str = retriever(
snake_case_ , snake_case_ , answer_ids=snake_case_ , max_length=snake_case_ , return_tensors='''np''' )
self.assertEqual([False, True, True] , snake_case_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , snake_case_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
A : str = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
A : Any = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
A : Union[str, Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 3 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase_ : List[str] = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Union[str, Any] = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
A_ : Tuple = os.path.join(_UpperCAmelCase , 'words.txt' )
A_ : List[Any] = ''
with open(_UpperCAmelCase ) as f:
A_ : int = f.readline()
A_ : Optional[Any] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
A_ : Dict = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution()) | 286 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24 |
"""simple docstring"""
def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = len(_snake_case )
__snake_case : str = sum(_snake_case )
__snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__snake_case : Optional[Any] = True
for i in range(1 , s + 1 ):
__snake_case : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__snake_case : Union[str, Any] = dp[i][j - 1]
if arr[i - 1] <= j:
__snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__snake_case : List[str] = s - 2 * j
break
return diff
| 24 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : Tuple = val
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCAmelCase__ : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
lowerCAmelCase__ : List[Any] = value
else:
lowerCAmelCase__ : Optional[int] = value
return new_state_dict
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase__ : Union[str, Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase__ : Any = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[:256, :]
lowerCAmelCase__ : List[Any] = in_proj_bias[:256]
lowerCAmelCase__ : str = in_proj_weight[256:512, :]
lowerCAmelCase__ : Optional[int] = in_proj_bias[256:512]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[-256:, :]
lowerCAmelCase__ : str = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase__ : List[str] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase__ : Optional[Any] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Optional[int] = in_proj_weight[:256, :]
lowerCAmelCase__ : Dict = in_proj_bias[:256]
lowerCAmelCase__ : Any = in_proj_weight[256:512, :]
lowerCAmelCase__ : Dict = in_proj_bias[256:512]
lowerCAmelCase__ : Tuple = in_proj_weight[-256:, :]
lowerCAmelCase__ : Union[str, Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase__ : List[Any] = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowerCAmelCase__ : List[str] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase__ : str = in_proj_weight_cross_attn[:256, :]
lowerCAmelCase__ : List[Any] = in_proj_bias_cross_attn[:256]
lowerCAmelCase__ : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
lowerCAmelCase__ : str = in_proj_bias_cross_attn[256:512]
lowerCAmelCase__ : Dict = in_proj_weight_cross_attn[-256:, :]
lowerCAmelCase__ : str = in_proj_bias_cross_attn[-256:]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : str = image.size
lowerCAmelCase__ : Tuple = max(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = 800 if """detection""" in checkpoint_url else 1000
lowerCAmelCase__ : Union[str, Any] = target_max_size / current_max_size
lowerCAmelCase__ : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = F.to_tensor(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = F.normalize(UpperCamelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase__ : str = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = rename_backbone_keys(UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase__ : List[str] = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowerCAmelCase__ : Optional[Any] = state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : int = val
# create HuggingFace model and load state dict
lowerCAmelCase__ : List[str] = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowerCAmelCase__ : int = 15
lowerCAmelCase__ : Tuple = 2
lowerCAmelCase__ : Optional[Any] = {0: """table""", 1: """table rotated"""}
lowerCAmelCase__ : Union[str, Any] = idalabel
lowerCAmelCase__ : Tuple = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase__ : Optional[Any] = 125
lowerCAmelCase__ : int = 6
lowerCAmelCase__ : Union[str, Any] = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
lowerCAmelCase__ : Any = idalabel
lowerCAmelCase__ : Any = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : List[Any] = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
lowerCAmelCase__ : Optional[int] = TableTransformerForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# verify our conversion
lowerCAmelCase__ : List[Any] = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
lowerCAmelCase__ : int = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=UpperCamelCase )
lowerCAmelCase__ : List[str] = Image.open(UpperCamelCase ).convert("""RGB""" )
lowerCAmelCase__ : Union[str, Any] = normalize(resize(UpperCamelCase , UpperCamelCase ) ).unsqueeze(0 )
lowerCAmelCase__ : Tuple = model(UpperCamelCase )
if "detection" in checkpoint_url:
lowerCAmelCase__ : List[str] = (1, 15, 3)
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
lowerCAmelCase__ : Optional[Any] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
lowerCAmelCase__ : Any = (1, 125, 7)
lowerCAmelCase__ : str = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
lowerCAmelCase__ : List[str] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
lowerCAmelCase__ : Optional[Any] = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(UpperCamelCase )
image_processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCAmelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 37 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase__ : str = set()
# Replace all the whitespace in our sentence
lowerCAmelCase__ : Tuple = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCamelCase ) == 26
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
lowerCAmelCase__ : Any = [False] * 26
for char in input_str:
if char.islower():
lowerCAmelCase__ : Optional[Any] = True
elif char.isupper():
lowerCAmelCase__ : Any = True
return all(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from timeit import timeit
lowerCAmelCase__ : Union[str, Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=UpperCamelCase ) )
print(timeit("""is_pangram_faster()""" , setup=UpperCamelCase ) )
print(timeit("""is_pangram_fastest()""" , setup=UpperCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 37 | 1 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowercase_ = get_logger(__name__)
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : Any = 'dummy_data'
_UpperCamelCase : str = 'datasets'
_UpperCamelCase : Any = False
def __init__( self : Optional[int] , a : str , a : str , a : Union[Version, str] , a : Optional[str] = None , a : bool = False , a : bool = True , a : Optional[List[Callable]] = None , )-> int:
"""simple docstring"""
lowercase__ = 0
lowercase__ = dataset_name
lowercase__ = cache_dir
lowercase__ = use_local_dummy_data
lowercase__ = config
# download_callbacks take a single url as input
lowercase__ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase__ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase__ = str(a )
# to be downloaded
lowercase__ = None
lowercase__ = None
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
if self._dummy_file is None:
lowercase__ = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase__ = cached_path(
a , cache_dir=self.cache_dir , extract_compressed_file=a , force_extract=a )
return os.path.join(a , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
if self._bucket_url is None:
lowercase__ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Optional[int] , *a : List[str] )-> List[str]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase__ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase__ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(a , a ):
return self.create_dummy_data_dict(a , a )
elif isinstance(a , (list, tuple) ):
return self.create_dummy_data_list(a , a )
else:
return self.create_dummy_data_single(a , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Union[str, Any] , *a : Optional[Any] )-> Optional[int]:
"""simple docstring"""
return self.download_and_extract(a )
def SCREAMING_SNAKE_CASE_ ( self : int , a : int , a : List[str] )-> List[Any]:
"""simple docstring"""
return self.download_and_extract(a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Tuple , *a : int , **a : List[str] )-> Tuple:
"""simple docstring"""
return path
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Any:
"""simple docstring"""
return {}
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Optional[int] , a : Union[str, Any] )-> int:
"""simple docstring"""
lowercase__ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(a , a ):
for single_url in single_urls:
download_callback(a )
else:
lowercase__ = single_urls
download_callback(a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(a , a ):
lowercase__ = [os.path.join(a , urllib.parse.quote_plus(Path(a ).name ) ) for x in single_urls]
else:
lowercase__ = single_urls
lowercase__ = os.path.join(a , urllib.parse.quote_plus(Path(a ).name ) )
lowercase__ = value
# make sure that values are unique
if all(isinstance(a , a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase__ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : List[Any] , a : Dict )-> Tuple:
"""simple docstring"""
lowercase__ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase__ = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , a ) ) for url in data_url )
lowercase__ = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase__ = [data_url[0]] * len(a )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase__ = os.path.join(a , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(a )
return dummy_data_list
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[int] , a : Optional[int] )-> Optional[Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase__ = os.path.join(a , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(a ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> str:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : int , a : List[str] )-> int:
"""simple docstring"""
def _iter_archive_members(a : Union[str, Any] ):
# this preserves the order of the members inside the ZIP archive
lowercase__ = Path(self.dummy_file ).parent
lowercase__ = path.relative_to(a )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase__ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(a )
lowercase__ = Path(a )
lowercase__ = _iter_archive_members(a ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(a ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[str] )-> int:
"""simple docstring"""
if not isinstance(a , a ):
lowercase__ = [paths]
for path in paths:
if os.path.isfile(a ):
if os.path.basename(a ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(a ):
if os.path.basename(a ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(a ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(a , a )
| 269 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="facebook/mbart-large-en-ro" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> str:
lowercase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
lowercase__ = state_dict['encoder.embed_tokens.weight'].shape[0]
lowercase__ = MBartConfig.from_pretrained(_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE )
if mbart_aa and finetuned:
lowercase__ = 'relu'
lowercase__ = state_dict['decoder.embed_tokens.weight']
lowercase__ = MBartForConditionalGeneration(_SCREAMING_SNAKE_CASE )
model.model.load_state_dict(_SCREAMING_SNAKE_CASE )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
lowercase_ = parser.parse_args()
lowercase_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 269 | 1 |
'''simple docstring'''
import os
def A_ ( ):
with open(os.path.dirname(snake_case ) + "/p022_names.txt" ) as file:
SCREAMING_SNAKE_CASE:Tuple = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE:List[Any] = names.replace("\"" , "" ).split("," )
names.sort()
SCREAMING_SNAKE_CASE:List[str] = 0
SCREAMING_SNAKE_CASE:Optional[Any] = 0
for i, name in enumerate(snake_case ):
for letter in name:
name_score += ord(snake_case ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE:Optional[Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 139 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
def __init__( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Tuple ):
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." ,SCREAMING_SNAKE_CASE__ ,)
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
| 139 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , lowerCAmelCase__ : bool , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCamelCase = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ )
else:
_UpperCamelCase = None
_UpperCamelCase = torch.nn.Parameter(lowerCAmelCase__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : VQModel
_snake_case : CLIPTextModel
_snake_case : CLIPTokenizer
_snake_case : TransformeraDModel
_snake_case : LearnedClassifierFreeSamplingEmbeddings
_snake_case : VQDiffusionScheduler
def __init__( self : Dict , lowerCAmelCase__ : VQModel , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : TransformeraDModel , lowerCAmelCase__ : VQDiffusionScheduler , lowerCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=lowerCAmelCase__ , transformer=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = len(lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else 1
# get prompt text embeddings
_UpperCamelCase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase__ )
# duplicate text embeddings for each generation per prompt
_UpperCamelCase = prompt_embeds.repeat_interleave(lowerCAmelCase__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(lowerCAmelCase__ , 1 , 1 )
else:
_UpperCamelCase = [''''''] * batch_size
_UpperCamelCase = text_input_ids.shape[-1]
_UpperCamelCase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCamelCase = negative_prompt_embeds.shape[1]
_UpperCamelCase = negative_prompt_embeds.repeat(1 , lowerCAmelCase__ , 1 )
_UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : str , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 100 , lowerCAmelCase__ : float = 5.0 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = 1
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = len(lowerCAmelCase__ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__ )}""" )
_UpperCamelCase = batch_size * num_images_per_prompt
_UpperCamelCase = guidance_scale > 1.0
_UpperCamelCase = self._encode_prompt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(lowerCAmelCase__ )}.""" )
# get the initial completely masked latents unless the user supplied it
_UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCamelCase = self.transformer.num_vector_embeds - 1
_UpperCamelCase = torch.full(lowerCAmelCase__ , lowerCAmelCase__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
_UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase__ , device=self.device )
_UpperCamelCase = self.scheduler.timesteps.to(self.device )
_UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the sample if we are doing classifier free guidance
_UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCamelCase = self.transformer(lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , timestep=lowerCAmelCase__ ).sample
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase = model_output.chunk(2 )
_UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowerCAmelCase__ , dim=1 , keepdim=lowerCAmelCase__ )
_UpperCamelCase = self.truncate(lowerCAmelCase__ , lowerCAmelCase__ )
# remove `log(0)`'s (`-inf`s)
_UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self.vqvae.config.vq_embed_dim
_UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCamelCase = self.vqvae.quantize.get_codebook_entry(lowerCAmelCase__ , shape=lowerCAmelCase__ )
_UpperCamelCase = self.vqvae.decode(lowerCAmelCase__ , force_not_quantize=lowerCAmelCase__ ).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : float ) -> torch.FloatTensor:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = torch.sort(lowerCAmelCase__ , 1 , descending=lowerCAmelCase__ )
_UpperCamelCase = torch.exp(lowerCAmelCase__ )
_UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , lowerCAmelCase__ )
_UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
_UpperCamelCase = keep_mask[:, :-1, :]
_UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
_UpperCamelCase = log_p_x_0.clone()
_UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 287 |
'''simple docstring'''
import random
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def snake_case__ ( lowerCAmelCase__ : str ) -> tuple[list[int], list[int]]:
'''simple docstring'''
_UpperCamelCase = [ord(lowerCAmelCase__ ) for i in text]
_UpperCamelCase = []
_UpperCamelCase = []
for i in plain:
_UpperCamelCase = random.randint(1 , 300 )
_UpperCamelCase = (i + k) * k
cipher.append(lowerCAmelCase__ )
key.append(lowerCAmelCase__ )
return cipher, key
@staticmethod
def snake_case__ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] ) -> str:
'''simple docstring'''
_UpperCamelCase = []
for i in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCAmelCase__ ) )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ , lowercase__ : List[str] = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 287 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowercase__ = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=UpperCamelCase , cache_dir=UpperCamelCase )
lowercase__ = [t[-1] for t in os.walk(os.path.join(UpperCamelCase , os.listdir(UpperCamelCase )[0] , '''snapshots''' ) )]
lowercase__ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=UpperCamelCase )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 4
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(UpperCamelCase )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase )
lowercase__ = jax.random.split(UpperCamelCase , UpperCamelCase )
lowercase__ = shard(UpperCamelCase )
lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(UpperCamelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
lowercase__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCamelCase ) == num_samples
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=UpperCamelCase )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(UpperCamelCase )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase )
lowercase__ = jax.random.split(UpperCamelCase , UpperCamelCase )
lowercase__ = shard(UpperCamelCase )
lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(UpperCamelCase )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase )
lowercase__ = jax.random.split(UpperCamelCase , UpperCamelCase )
lowercase__ = shard(UpperCamelCase )
lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(UpperCamelCase )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase )
lowercase__ = jax.random.split(UpperCamelCase , UpperCamelCase )
lowercase__ = shard(UpperCamelCase )
lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=UpperCamelCase , steps_offset=1 , )
lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , )
lowercase__ = scheduler.create_state()
lowercase__ = scheduler_state
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(UpperCamelCase )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase )
lowercase__ = jax.random.split(UpperCamelCase , UpperCamelCase )
lowercase__ = shard(UpperCamelCase )
lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = jax.random.split(jax.random.PRNGKey(0 ) , UpperCamelCase )
lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase , )
lowercase__ = replicate(UpperCamelCase )
lowercase__ = pipeline.prepare_inputs(UpperCamelCase )
lowercase__ = shard(UpperCamelCase )
lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
lowercase__ = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
lowercase__ ,lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase , use_memory_efficient_attention=UpperCamelCase , )
lowercase__ = replicate(UpperCamelCase )
lowercase__ = pipeline.prepare_inputs(UpperCamelCase )
lowercase__ = shard(UpperCamelCase )
lowercase__ = pipeline(UpperCamelCase , UpperCamelCase , UpperCamelCase , jit=UpperCamelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
lowercase__ = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 2 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a = _symbol_database.Default()
_a = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
_a = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a = None
_a = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a = 45
_a = 15_81
_a = 15_17
_a = 15_70
_a = 15_84
_a = 17_93
_a = 17_95
_a = 19_16
_a = 18_64
_a = 19_05
_a = 19_19
_a = 24_29
_a = 22_08
_a = 24_18
_a = 23_23
_a = 24_07
# @@protoc_insertion_point(module_scope)
| 17 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ (__snake_case , __snake_case ):
__lowerCamelCase : Dict = """convnextv2"""
def __init__( self , a=3 , a=4 , a=4 , a=None , a=None , a="gelu" , a=0.02 , a=1e-12 , a=0.0 , a=224 , a=None , a=None , **a , ):
super().__init__(**a)
lowercase__ : Tuple = num_channels
lowercase__ : List[Any] = patch_size
lowercase__ : Any = num_stages
lowercase__ : Dict = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
lowercase__ : Tuple = [3, 3, 9, 3] if depths is None else depths
lowercase__ : str = hidden_act
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : Dict = drop_path_rate
lowercase__ : Any = image_size
lowercase__ : Dict = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(self.depths) + 1)]
lowercase__ , lowercase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names)
| 216 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 216 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCAmelCase__ : List[str] = '''lm_head'''
lowerCAmelCase__ : Tuple = getattr(__lowercase , __lowercase )
if weight_type is not None:
lowerCAmelCase__ : List[Any] = getattr(__lowercase , __lowercase ).shape
else:
lowerCAmelCase__ : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase__ : Any = value
elif weight_type == "weight_g":
lowerCAmelCase__ : Dict = value
elif weight_type == "weight_v":
lowerCAmelCase__ : Tuple = value
elif weight_type == "bias":
lowerCAmelCase__ : int = value
else:
lowerCAmelCase__ : int = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : Dict = fairseq_model.state_dict()
lowerCAmelCase__ : List[Any] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
__lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase__ : Dict = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ : Tuple = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase__ : Union[str, Any] = True
if "*" in mapped_key:
lowerCAmelCase__ : Optional[int] = name.split(__lowercase )[0].split(""".""" )[-2]
lowerCAmelCase__ : Union[str, Any] = mapped_key.replace("""*""" , __lowercase )
if "weight_g" in name:
lowerCAmelCase__ : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase__ : str = '''weight_v'''
elif "bias" in name:
lowerCAmelCase__ : Optional[int] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ : Tuple = '''weight'''
else:
lowerCAmelCase__ : Tuple = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase__ : List[str] = name.split(""".""" )
lowerCAmelCase__ : List[str] = int(items[0] )
lowerCAmelCase__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase__ : str = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase__ : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase__ : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase__ : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowercase )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ : Optional[int] = UniSpeechConfig.from_pretrained(__lowercase )
else:
lowerCAmelCase__ : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase__ : int = Dictionary.load_from_json(__lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ : str = target_dict.pad_index
lowerCAmelCase__ : Union[str, Any] = target_dict.bos_index
lowerCAmelCase__ : List[Any] = target_dict.eos_index
lowerCAmelCase__ : Tuple = len(target_dict.symbols )
lowerCAmelCase__ : Any = os.path.join(__lowercase , """vocab.json""" )
if not os.path.isdir(__lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowercase ) )
return
os.makedirs(__lowercase , exist_ok=__lowercase )
lowerCAmelCase__ : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ : Tuple = 42
lowerCAmelCase__ : Union[str, Any] = 43
with open(__lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowercase , __lowercase )
lowerCAmelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizer(
__lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowercase , )
lowerCAmelCase__ : Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
lowerCAmelCase__ : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowercase , return_attention_mask=__lowercase , )
lowerCAmelCase__ : str = WavaVecaProcessor(feature_extractor=__lowercase , tokenizer=__lowercase )
processor.save_pretrained(__lowercase )
lowerCAmelCase__ : List[Any] = UniSpeechForCTC(__lowercase )
else:
lowerCAmelCase__ : Union[str, Any] = UniSpeechForPreTraining(__lowercase )
if is_finetuned:
lowerCAmelCase__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
lowerCAmelCase__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase__ : Tuple = model[0].eval()
recursively_load_weights(__lowercase , __lowercase , __lowercase )
hf_unispeech.save_pretrained(__lowercase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 37 |
'''simple docstring'''
from itertools import permutations
def SCREAMING_SNAKE_CASE( __lowercase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A: int = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(__lowercase ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase = 1_0 ) -> int:
return sum(
int(''''''.join(map(__lowercase , __lowercase ) ) )
for num in permutations(range(__lowercase ) )
if is_substring_divisible(__lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 319 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
snake_case__ = BioGptTokenizer
snake_case__ = False
def _UpperCamelCase ( self : Dict ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_UpperCamelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCamelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
def _UpperCamelCase ( self : str , __UpperCamelCase : Union[str, Any] ) -> List[Any]:
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = '''lower newer'''
return input_text, output_text
def _UpperCamelCase ( self : List[str] ) -> str:
_UpperCamelCase = BioGptTokenizer(self.vocab_file , self.merges_file )
_UpperCamelCase = '''lower'''
_UpperCamelCase = ['''low''', '''er</w>''']
_UpperCamelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = tokens + ['''<unk>''']
_UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
@slow
def _UpperCamelCase ( self : str ) -> Tuple:
_UpperCamelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
_UpperCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCamelCase )
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCamelCase )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 54 | """simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase_ :
snake_case__ = PegasusConfig
snake_case__ = {}
snake_case__ = '''gelu'''
def __init__( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[str]=13 , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : int=False , __UpperCamelCase : Optional[Any]=99 , __UpperCamelCase : int=32 , __UpperCamelCase : List[str]=5 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : List[str]=20 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Dict=0 , ) -> str:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def _UpperCamelCase ( self : Tuple ) -> List[str]:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _UpperCamelCase ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ) -> str:
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__UpperCamelCase )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = model.decode(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ) -> List[str]:
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__UpperCamelCase )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCamelCase = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowercase ( a__ : Dict , a__ : str , a__ : str , a__ : Optional[int]=None , a__ : str=None , ) -> List[str]:
if attention_mask is None:
_UpperCamelCase = np.not_equal(a__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
snake_case__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
_UpperCamelCase = FlaxPegasusModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Tuple:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ) -> Dict:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> str:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> str:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : Dict , __UpperCamelCase : str=None , **__UpperCamelCase : Dict ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( self : Optional[Any] ) -> Dict:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = model_class(__UpperCamelCase )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict:
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__UpperCamelCase )
_UpperCamelCase = np.ones((1, 1) )
_UpperCamelCase = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def _UpperCamelCase ( self : str ) -> Any:
_UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
_UpperCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
_UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_UpperCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_UpperCamelCase = tokenizer(__UpperCamelCase , return_tensors='''np''' , truncation=__UpperCamelCase , max_length=512 , padding=__UpperCamelCase )
_UpperCamelCase = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
_UpperCamelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded
| 54 | 1 |
from typing import List
import numpy as np
def a_ ( SCREAMING_SNAKE_CASE__ : dict ):
'''simple docstring'''
_lowerCamelCase : Dict ={key: len(SCREAMING_SNAKE_CASE__ ) for key, value in gen_kwargs.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
_lowerCamelCase : Tuple =max(lists_lengths.values() , default=0 )
return max(1 , SCREAMING_SNAKE_CASE__ )
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
_lowerCamelCase : Tuple =[]
for group_idx in range(SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : List[str] =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCamelCase : List[Any] =shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCamelCase : Any =range(SCREAMING_SNAKE_CASE__ , start + num_shards_to_add )
shards_indices_per_group.append(SCREAMING_SNAKE_CASE__ )
return shards_indices_per_group
def a_ ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
_lowerCamelCase : List[str] =_number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE__ )
if num_shards == 1:
return [dict(SCREAMING_SNAKE_CASE__ )]
else:
_lowerCamelCase : str =_distribute_shards(num_shards=SCREAMING_SNAKE_CASE__ , max_num_jobs=SCREAMING_SNAKE_CASE__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(SCREAMING_SNAKE_CASE__ ) )
]
def a_ ( SCREAMING_SNAKE_CASE__ : List[dict] ):
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , SCREAMING_SNAKE_CASE__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def a_ ( SCREAMING_SNAKE_CASE__ : np.random.Generator , SCREAMING_SNAKE_CASE__ : dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] ={len(SCREAMING_SNAKE_CASE__ ) for value in gen_kwargs.values() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
_lowerCamelCase : Tuple ={}
for size in list_sizes:
_lowerCamelCase : Optional[int] =list(range(SCREAMING_SNAKE_CASE__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCamelCase : int =dict(SCREAMING_SNAKE_CASE__ )
for key, value in shuffled_kwargs.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : Union[str, Any] =[value[i] for i in indices_per_size[len(SCREAMING_SNAKE_CASE__ )]]
return shuffled_kwargs
| 199 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCamelCase = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
lowerCamelCase = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
lowerCamelCase = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def lowerCamelCase ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]="uniform_average" , lowercase_ : Tuple=True ) -> Any:
"""simple docstring"""
_lowerCamelCase : List[str] =mean_squared_error(
lowercase_ , lowercase_ , sample_weight=lowercase_ , multioutput=lowercase_ , squared=lowercase_ )
return {"mse": mse}
| 199 | 1 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
return 10 - x * x
def _A ( A__ , A__ ):
"""simple docstring"""
if equation(lowercase__ ) * equation(lowercase__ ) >= 0:
raise ValueError('''Wrong space!''' )
__lowercase = a
while (b - a) >= 0.0_1:
# Find middle point
__lowercase = (a + b) / 2
# Check if middle point is root
if equation(lowercase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowercase__ ) * equation(lowercase__ ) < 0:
__lowercase = c
else:
__lowercase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 351 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'gpt_neo'
SCREAMING_SNAKE_CASE : Any = ['past_key_values']
SCREAMING_SNAKE_CASE : Union[str, Any] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Any ,lowercase__ : Tuple=5_0_2_5_7 ,lowercase__ : Union[str, Any]=2_0_4_8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : Optional[Any]=2_4 ,lowercase__ : Union[str, Any]=[[["global", "local"], 1_2]] ,lowercase__ : List[Any]=1_6 ,lowercase__ : Optional[Any]=None ,lowercase__ : Optional[int]=2_5_6 ,lowercase__ : Union[str, Any]="gelu_new" ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Dict=0.0 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[str]=1e-5 ,lowercase__ : Dict=0.0_2 ,lowercase__ : str=True ,lowercase__ : int=5_0_2_5_6 ,lowercase__ : Any=5_0_2_5_6 ,**lowercase__ : Optional[Any] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_layers
__lowercase = num_heads
__lowercase = intermediate_size
__lowercase = window_size
__lowercase = activation_function
__lowercase = resid_dropout
__lowercase = embed_dropout
__lowercase = attention_dropout
__lowercase = classifier_dropout
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
__lowercase = attention_types
__lowercase = self.expand_attention_types_params(lowercase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
F"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
F"`config.num_layers = {self.num_layers}`. "
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : Tuple ):
__lowercase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
import torch
__lowercase = input.size()
__lowercase = len(A__ )
__lowercase = shape[dimension]
__lowercase = torch.arange(0 , A__ , A__ )
__lowercase = torch.div(sizedim - size , A__ , rounding_mode='''floor''' ) + 1
__lowercase = torch.arange(A__ ) + low_indices[:min_length][:, None]
__lowercase = [slice(A__ )] * rank
__lowercase = indices
__lowercase = input[s]
__lowercase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
import torch
__lowercase = torch.arange(1 , A__ )
__lowercase = torch.remainder(A__ , A__ )
__lowercase = remainders == 0
__lowercase = candidates[divisor_indices]
__lowercase = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode='''floor''' )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
__lowercase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self._config.num_heads
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = super(lowercase__ ,self ).generate_dummy_inputs(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
# We need to order the input in the way they appears in the forward()
__lowercase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(self.num_layers )
]
__lowercase = common_inputs['''attention_mask''']
if self.use_past:
__lowercase = ordered_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
return 1_3
| 52 | 0 |
'''simple docstring'''
from math import factorial
def a_ ( lowerCamelCase : int = 100 ):
return sum(int(lowerCamelCase ) for x in str(factorial(lowerCamelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 4 | from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = TFResNetModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = TFResNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ):
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
| 343 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class a_ ( _snake_case ):
UpperCamelCase__ : Any ="distilbert"
UpperCamelCase__ : int ={
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self :int , _lowercase :Tuple=30522 , _lowercase :Tuple=512 , _lowercase :Optional[Any]=False , _lowercase :int=6 , _lowercase :int=12 , _lowercase :Tuple=768 , _lowercase :Dict=4 * 768 , _lowercase :Tuple=0.1 , _lowercase :Tuple=0.1 , _lowercase :Dict="gelu" , _lowercase :List[Any]=0.02 , _lowercase :str=0.1 , _lowercase :Optional[Any]=0.2 , _lowercase :Tuple=0 , **_lowercase :Union[str, Any] , ) -> List[Any]:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = sinusoidal_pos_embds
UpperCAmelCase_ = n_layers
UpperCAmelCase_ = n_heads
UpperCAmelCase_ = dim
UpperCAmelCase_ = hidden_dim
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = qa_dropout
UpperCAmelCase_ = seq_classif_dropout
super().__init__(**_lowercase , pad_token_id=_lowercase)
class a_ ( _snake_case ):
@property
def __a ( self :Union[str, Any]) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 352 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class a_ :
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="dict"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self :List[Any]) -> List[Any]:
return self.pa_type
def __a ( self :Any , _lowercase :Union[str, bytes, dict]) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :Dict , _lowercase :dict , _lowercase :Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Union[str, Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :Any , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :Tuple):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 344 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[Any] ={
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] =[
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53 |
def lowerCAmelCase_ ( snake_case_ ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 0 |
def __lowerCamelCase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('only integers accepted as input' )
else:
lowerCAmelCase__ = str(abs(lowerCAmelCase__ ) )
lowerCAmelCase__ = [list(lowerCAmelCase__ ) for char in range(len(lowerCAmelCase__ ) )]
for index in range(len(lowerCAmelCase__ ) ):
num_transpositions[index].pop(lowerCAmelCase__ )
return max(
int(''.join(list(lowerCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 119 | import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase__ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int , *lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : Dict=None , **lowercase__ : Optional[int]):
'''simple docstring'''
super().__init__(*lowercase__ , **lowercase__)
lowerCAmelCase__ = eval_examples
lowerCAmelCase__ = post_process_function
lowerCAmelCase__ = quant_trainer_args
lowerCAmelCase__ = 128 # default number of calibration samples
def __snake_case ( self : Tuple , lowercase__ : Any=None):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.')
lowerCAmelCase__ = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCAmelCase__ = self._remove_unused_columns(lowercase__ , description='Calibration')
return DataLoader(
lowercase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowercase__ , )
def __snake_case ( self : List[Any] , lowercase__ : Union[str, Any]=None):
'''simple docstring'''
lowerCAmelCase__ = self.train_dataset if calib_dataset is None else calib_dataset
lowerCAmelCase__ = self.get_calib_dataloader(lowercase__)
lowerCAmelCase__ = self.model
quant_trainer.configure_model(lowercase__ , self.quant_trainer_args , calib=lowercase__)
model.eval()
quant_trainer.enable_calibration(lowercase__)
logger.info('***** Running calibration *****')
logger.info(F""" Num examples = {self.calib_num}""")
logger.info(F""" Batch size = {calib_dataloader.batch_size}""")
for step, inputs in enumerate(lowercase__):
# Prediction step
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.prediction_step(lowercase__ , lowercase__ , prediction_loss_only=lowercase__)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowercase__ , self.quant_trainer_args)
lowerCAmelCase__ = model
def __snake_case ( self : Optional[Any] , lowercase__ : List[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : List[Any]=None , lowercase__ : str = "eval"):
'''simple docstring'''
lowerCAmelCase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase__ = self.get_eval_dataloader(lowercase__)
lowerCAmelCase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ = self.compute_metrics
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ = eval_loop(
lowercase__ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , )
finally:
lowerCAmelCase__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCAmelCase__ = self.post_process_function(lowercase__ , lowercase__ , output.predictions)
lowerCAmelCase__ = self.compute_metrics(lowercase__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"""{metric_key_prefix}_"""):
lowerCAmelCase__ = metrics.pop(lowercase__)
self.log(lowercase__)
else:
lowerCAmelCase__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowerCAmelCase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase__)
return metrics
def __snake_case ( self : Optional[int] , lowercase__ : str , lowercase__ : Any , lowercase__ : List[str]=None , lowercase__ : str = "test"):
'''simple docstring'''
lowerCAmelCase__ = self.get_test_dataloader(lowercase__)
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ = self.compute_metrics
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ = eval_loop(
lowercase__ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , )
finally:
lowerCAmelCase__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase__ = self.post_process_function(lowercase__ , lowercase__ , output.predictions , 'predict')
lowerCAmelCase__ = self.compute_metrics(lowercase__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"""{metric_key_prefix}_"""):
lowerCAmelCase__ = metrics.pop(lowercase__)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase__)
def __snake_case ( self : List[str] , lowercase__ : List[str]="./"):
'''simple docstring'''
lowerCAmelCase__ = self.eval_dataset
lowerCAmelCase__ = self.get_eval_dataloader(lowercase__)
lowerCAmelCase__ = next(iter(lowercase__))
# saving device - to make it consistent
lowerCAmelCase__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# convert to tuple
lowerCAmelCase__ = tuple(v.to(lowercase__) for k, v in batch.items())
logger.info('Converting model to be onnx compatible')
from pytorch_quantization.nn import TensorQuantizer
lowerCAmelCase__ = True
lowerCAmelCase__ = self.model.to(lowercase__)
model.eval()
model.float()
lowerCAmelCase__ = model.module if hasattr(lowercase__ , 'module') else model
quant_trainer.configure_model(lowercase__ , self.quant_trainer_args)
lowerCAmelCase__ = os.path.join(lowercase__ , 'model.onnx')
logger.info(F"""exporting model to {output_model_file}""")
lowerCAmelCase__ = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
lowercase__ , lowercase__ , lowercase__ , export_params=lowercase__ , opset_version=13 , do_constant_folding=lowercase__ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=lowercase__ , )
logger.info('onnx export finished')
| 119 | 1 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : str , a__ : Union[str, "sqlalchemy.sql.Selectable"] , a__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , a__ : Optional[Features] = None , a__ : str = None , a__ : bool = False , **a__ : Dict , ):
"""simple docstring"""
super().__init__(features=a__ , cache_dir=a__ , keep_in_memory=a__ , **a__ )
__snake_case = Sql(
cache_dir=a__ , features=a__ , sql=a__ , con=a__ , **a__ , )
def a (self : int ):
"""simple docstring"""
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
self.builder.download_and_prepare(
download_config=a__ , download_mode=a__ , verification_mode=a__ , base_path=a__ , )
# Build dataset for splits
__snake_case = self.builder.as_dataset(
split='''train''' , verification_mode=a__ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
def __init__(self : int , a__ : Dataset , a__ : str , a__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , a__ : Optional[int] = None , a__ : Optional[int] = None , **a__ : str , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__snake_case = dataset
__snake_case = name
__snake_case = con
__snake_case = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__snake_case = num_proc
__snake_case = to_sql_kwargs
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.to_sql_kwargs.pop('''sql''' , a__ )
__snake_case = self.to_sql_kwargs.pop('''con''' , a__ )
__snake_case = self.to_sql_kwargs.pop('''index''' , a__ )
__snake_case = self._write(index=a__ , **self.to_sql_kwargs )
return written
def a (self : Optional[Any] , a__ : List[str] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = args
__snake_case = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__snake_case = query_table(
table=self.dataset.data , key=slice(a__ , offset + self.batch_size ) , indices=self.dataset._indices , )
__snake_case = batch.to_pandas()
__snake_case = df.to_sql(self.name , self.con , index=a__ , **a__ )
return num_rows or len(a__ )
def a (self : Union[str, Any] , a__ : Dict , **a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__snake_case , __snake_case = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a__ , a__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 24 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24 | 1 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase_ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class UpperCamelCase_ (__A ):
def __init__( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]=1 ) -> Dict:
UpperCAmelCase_ : Dict = tokenizer
UpperCAmelCase_ : str = dataset
UpperCAmelCase_ : List[str] = len(lowerCAmelCase_ ) if n_tasks is None else n_tasks
UpperCAmelCase_ : List[Any] = n_copies
def __iter__( self : str ) -> Union[str, Any]:
UpperCAmelCase_ : Any = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
UpperCAmelCase_ : Dict = self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCamelCase_ (__A ):
def __init__( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ) -> Tuple:
UpperCAmelCase_ : Tuple = start_length
UpperCAmelCase_ : Dict = eof_strings
UpperCAmelCase_ : Union[str, Any] = tokenizer
def __call__( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Any ) -> Optional[Any]:
UpperCAmelCase_ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase_ : Any = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCAmelCase_ )
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[Any] = re.split("(%s)" % "|".join(A__ ) ,A__ )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__=20 ,**A__ ):
UpperCAmelCase_ : Any = defaultdict(A__ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A__ ) ):
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = batch["ids"].shape[-1]
UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(A__ ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] ,num_return_sequences=A__ ,**A__ )
# each task is generated batch_size times
UpperCAmelCase_ : str = batch["task_id"].repeat(A__ )
UpperCAmelCase_ : Optional[Any] = accelerator.pad_across_processes(
A__ ,dim=1 ,pad_index=tokenizer.pad_token_id )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ : Dict = generated_tokens.cpu().numpy()
UpperCAmelCase_ : Tuple = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A__ ,A__ ):
gen_token_dict[task].append(A__ )
UpperCAmelCase_ : int = [[] for _ in range(A__ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(A__ ,skip_special_tokens=A__ ,clean_up_tokenization_spaces=A__ )
code_gens[task].append(remove_last_block(A__ ) )
return code_gens
def snake_case ( ):
# Setup configuration
UpperCAmelCase_ : Any = HfArgumentParser(A__ )
UpperCAmelCase_ : Any = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ : List[str] = "false"
if args.num_workers is None:
UpperCAmelCase_ : Union[str, Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ : Optional[int] = Accelerator()
set_seed(args.seed ,device_specific=A__ )
# Load model and tokenizer
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ : Any = tokenizer.eos_token
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 ,A__ ,A__ )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ : Union[str, Any] = load_dataset("openai_humaneval" )
UpperCAmelCase_ : Optional[int] = load_metric("code_eval" )
UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
UpperCAmelCase_ : str = args.n_samples // args.batch_size
UpperCAmelCase_ : str = TokenizedDataset(A__ ,human_eval["test"] ,n_copies=A__ ,n_tasks=A__ )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ : List[str] = DataLoader(A__ ,batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ : int = code_eval_metric.compute(references=[""] ,predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.prepare(A__ ,A__ )
UpperCAmelCase_ : List[str] = complete_code(
A__ ,A__ ,A__ ,A__ ,n_tasks=A__ ,batch_size=args.batch_size ,**A__ ,)
if accelerator.is_main_process:
UpperCAmelCase_ : int = []
for task in tqdm(range(A__ ) ):
UpperCAmelCase_ : str = human_eval["test"][task]["test"]
UpperCAmelCase_ : Optional[int] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = code_eval_metric.compute(
references=A__ ,predictions=A__ ,num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file ,"w" ) as fp:
json.dump(A__ ,A__ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 253 |
"""simple docstring"""
def snake_case ( A__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(A__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 253 | 1 |
"""simple docstring"""
__snake_case : List[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : Optional[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : List[str] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
} | 269 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Any = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'mctct'
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str=8065 , _SCREAMING_SNAKE_CASE: str=1536 , _SCREAMING_SNAKE_CASE: str=36 , _SCREAMING_SNAKE_CASE: Optional[Any]=6144 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: Union[str, Any]=384 , _SCREAMING_SNAKE_CASE: Optional[Any]=920 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-5 , _SCREAMING_SNAKE_CASE: List[Any]=0.3 , _SCREAMING_SNAKE_CASE: Optional[Any]="relu" , _SCREAMING_SNAKE_CASE: Optional[int]=0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.3 , _SCREAMING_SNAKE_CASE: Dict=0.3 , _SCREAMING_SNAKE_CASE: List[Any]=1 , _SCREAMING_SNAKE_CASE: Optional[Any]=0 , _SCREAMING_SNAKE_CASE: List[str]=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1 , _SCREAMING_SNAKE_CASE: Tuple=0.3 , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: int=(7,) , _SCREAMING_SNAKE_CASE: str=(3,) , _SCREAMING_SNAKE_CASE: Union[str, Any]=80 , _SCREAMING_SNAKE_CASE: Tuple=1 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Tuple="sum" , _SCREAMING_SNAKE_CASE: List[str]=False , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : str = num_hidden_layers
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Dict = attention_head_dim
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : Tuple = layerdrop
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : str = pad_token_id
__lowerCAmelCase : Optional[int] = bos_token_id
__lowerCAmelCase : Union[str, Any] = eos_token_id
__lowerCAmelCase : Any = conv_glu_dim
__lowerCAmelCase : Optional[int] = conv_dropout
__lowerCAmelCase : Union[str, Any] = num_conv_layers
__lowerCAmelCase : Optional[int] = input_feat_per_channel
__lowerCAmelCase : Union[str, Any] = input_channels
__lowerCAmelCase : Optional[Any] = conv_channels
__lowerCAmelCase : Dict = ctc_loss_reduction
__lowerCAmelCase : int = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""") | 269 | 1 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
snake_case : Optional[int] = 'sshleifer/bart-tiny-random'
snake_case : List[Any] = 'patrickvonplaten/t5-tiny-random'
@require_torch
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return AutoConfig.from_pretrained(lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self ):
a , *a :Dict = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def SCREAMING_SNAKE_CASE__ ( self ):
a , *a :Optional[Any] = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self ):
a , *a :Tuple = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def SCREAMING_SNAKE_CASE__ ( self ):
a , *a :Optional[int] = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def SCREAMING_SNAKE_CASE__ ( self ):
with self.assertRaises(lowercase_ ):
create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=lowercase_ , d=lowercase_ )
| 356 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : Any = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 281 | 0 |
_lowerCamelCase ={
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : str = set()
# keep track of all the paths to be checked
lowerCamelCase : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowerCamelCase : Any = queue.pop(0 )
# get the last node from the path
lowerCamelCase : Union[str, Any] = path[-1]
if node not in explored:
lowerCamelCase : List[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowerCamelCase : str = list(lowerCamelCase )
new_path.append(lowerCamelCase )
queue.append(lowerCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCamelCase )
# in case there's no path between the 2 nodes
return []
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowerCamelCase : Optional[Any] = [start]
lowerCamelCase : List[Any] = set(lowerCamelCase )
# Keep tab on distances from `start` node.
lowerCamelCase : int = {start: 0, target: -1}
while queue:
lowerCamelCase : List[Any] = queue.pop(0 )
if node == target:
lowerCamelCase : Dict = (
dist[node] if dist[target] == -1 else min(dist[target], dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCamelCase )
queue.append(lowerCamelCase )
lowerCamelCase : Dict = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 287 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 1 |
'''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 345 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ ={
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
lowercase__ ={
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
lowercase__ ={
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Optional[int] = RoFormerTokenizer
def __init__(self : List[str] , snake_case_ : Optional[int]=None , snake_case_ : str=None , snake_case_ : Optional[Any]=True , snake_case_ : str="[UNK]" , snake_case_ : Dict="[SEP]" , snake_case_ : Any="[PAD]" , snake_case_ : str="[CLS]" , snake_case_ : List[Any]="[MASK]" , snake_case_ : Any=True , snake_case_ : List[str]=None , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__a : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , snake_case_ ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , snake_case_ ) != strip_accents
):
__a : List[str] = getattr(snake_case_ , pre_tok_state.pop('''type''' ) )
__a : Optional[Any] = do_lower_case
__a : Optional[int] = strip_accents
__a : List[str] = pre_tok_class(**snake_case_ )
__a : Optional[Any] = do_lower_case
def __getstate__(self : Union[str, Any] ):
__a : Any = self.__dict__.copy()
__a : Union[str, Any] = BertPreTokenizer()
return state
def __setstate__(self : Tuple , snake_case_ : Optional[Any] ):
__a : Dict = d
__a : str = self.__dict__['''_tokenizer'''].get_vocab()
__a : Optional[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) )
def lowerCAmelCase (self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any]=None ):
__a : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase (self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__a : int = [self.sep_token_id]
__a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase (self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
__a : Optional[Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCAmelCase (self : Dict , snake_case_ : Dict , snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ):
__a : List[str] = BertPreTokenizer()
return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
| 216 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class a__ ( UpperCamelCase__ ):
a : Optional[int] = """bridgetower_vision_model"""
def __init__( self , A=768 , A=12 , A=3 , A=16 , A=288 , A=1 , A=1e-05 , A=False , A=True , A=False , **A , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**A )
a = hidden_size
a = num_hidden_layers
a = num_channels
a = patch_size
a = image_size
a = initializer_factor
a = layer_norm_eps
a = stop_gradient
a = share_layernorm
a = remove_last_layer
@classmethod
def lowerCAmelCase_ ( cls , A , **A ) -> "PretrainedConfig":
'''simple docstring'''
a , a = cls.get_config_dict(A , **A )
if config_dict.get("model_type" ) == "bridgetower":
a = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A , **A )
class a__ ( UpperCamelCase__ ):
a : List[Any] = """bridgetower_text_model"""
def __init__( self , A=50265 , A=768 , A=12 , A=12 , A=1 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=514 , A=1 , A=1e-05 , A=1 , A=0 , A=2 , A="absolute" , A=True , **A , ) -> int:
'''simple docstring'''
super().__init__(**A )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = initializer_factor
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = layer_norm_eps
a = position_embedding_type
a = use_cache
a = pad_token_id
a = bos_token_id
a = eos_token_id
@classmethod
def lowerCAmelCase_ ( cls , A , **A ) -> "PretrainedConfig":
'''simple docstring'''
a , a = cls.get_config_dict(A , **A )
if config_dict.get("model_type" ) == "bridgetower":
a = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A , **A )
class a__ ( UpperCamelCase__ ):
a : str = """bridgetower"""
def __init__( self , A=True , A="gelu" , A=768 , A=1 , A=1e-05 , A=False , A="add" , A=12 , A=6 , A=False , A=False , A=None , A=None , **A , ) -> Dict:
'''simple docstring'''
a = kwargs.pop("text_config_dict" , A )
a = kwargs.pop("vision_config_dict" , A )
super().__init__(**A )
a = share_cross_modal_transformer_layers
a = hidden_act
a = hidden_size
a = initializer_factor
a = layer_norm_eps
a = share_link_tower_layers
a = link_tower_type
a = num_attention_heads
a = num_hidden_layers
a = tie_word_embeddings
a = init_layernorm_from_vision_encoder
if text_config is None:
a = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
a = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
a = BridgeTowerTextConfig(**A )
a = BridgeTowerVisionConfig(**A )
@classmethod
def lowerCAmelCase_ ( cls , A , A , **A ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = copy.deepcopy(self.__dict__ )
a = self.text_config.to_dict()
a = self.vision_config.to_dict()
a = self.__class__.model_type
return output
| 370 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : int = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class a__ ( UpperCamelCase__ ):
a : Optional[Any] = """sew-d"""
def __init__( self , A=32 , A=768 , A=12 , A=12 , A=3072 , A=2 , A=512 , A=256 , A=True , A=True , A=("p2c", "c2p") , A="layer_norm" , A="gelu_python" , A=0.1 , A=0.1 , A=0.1 , A=0.0 , A=0.1 , A=0.0_2 , A=1e-7 , A=1e-5 , A="group" , A="gelu" , A=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A=False , A=128 , A=16 , A=True , A=0.0_5 , A=10 , A=2 , A=0.0 , A=10 , A=0 , A="mean" , A=False , A=False , A=256 , A=0 , A=1 , A=2 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
a = hidden_size
a = feat_extract_norm
a = feat_extract_activation
a = list(A )
a = list(A )
a = list(A )
a = conv_bias
a = num_conv_pos_embeddings
a = num_conv_pos_embedding_groups
a = len(self.conv_dim )
a = num_hidden_layers
a = intermediate_size
a = squeeze_factor
a = max_position_embeddings
a = position_buckets
a = share_att_key
a = relative_attention
a = norm_rel_ebd
a = list(A )
a = hidden_act
a = num_attention_heads
a = hidden_dropout
a = attention_dropout
a = activation_dropout
a = feat_proj_dropout
a = final_dropout
a = layer_norm_eps
a = feature_layer_norm_eps
a = initializer_range
a = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a = apply_spec_augment
a = mask_time_prob
a = mask_time_length
a = mask_time_min_masks
a = mask_feature_prob
a = mask_feature_length
a = mask_feature_min_masks
# ctc loss
a = ctc_loss_reduction
a = ctc_zero_infinity
# sequence classification
a = use_weighted_layer_sum
a = classifier_proj_size
@property
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 180 | 0 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ) -> str:
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = VideoClassificationPipeline(model=UpperCAmelCase__ , image_processor=UpperCAmelCase__ , top_k=2 )
__SCREAMING_SNAKE_CASE = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Optional[int]:
for example in examples:
__SCREAMING_SNAKE_CASE = video_classifier(UpperCAmelCase__ )
self.assertEqual(
UpperCAmelCase__ , [
{"score": ANY(UpperCAmelCase__ ), "label": ANY(UpperCAmelCase__ )},
{"score": ANY(UpperCAmelCase__ ), "label": ANY(UpperCAmelCase__ )},
] , )
@require_torch
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
__SCREAMING_SNAKE_CASE = VideoMAEFeatureExtractor(
size={"shortest_edge": 1_0} , crop_size={"height": 1_0, "width": 1_0} )
__SCREAMING_SNAKE_CASE = pipeline(
"video-classification" , model=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , frame_sampling_rate=4 )
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
__SCREAMING_SNAKE_CASE = video_classifier(UpperCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}] , )
__SCREAMING_SNAKE_CASE = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
] , )
@require_tf
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
pass
| 54 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
'''simple docstring'''
if start is None:
__SCREAMING_SNAKE_CASE = 0
if end is None:
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) - 1
if start >= end:
return
__SCREAMING_SNAKE_CASE = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 54 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
def _lowerCamelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ):
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 274 | '''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [0] * len(lowerCamelCase_ )
for i in range(1 , len(lowerCamelCase_ ) ):
# use last results for better performance - dynamic programming
UpperCAmelCase_ : List[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCAmelCase_ : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCAmelCase_ : Any = j
return prefix_result
def _lowerCamelCase ( lowerCamelCase_ : str ):
"""simple docstring"""
return max(prefix_function(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 274 | 1 |
import math
import flax.linen as nn
import jax.numpy as jnp
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1 , _lowerCAmelCase = 1 , _lowerCAmelCase = 1.0e4 , _lowerCAmelCase = False , _lowerCAmelCase = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
UpperCamelCase : List[str] = float(embedding_dim // 2 )
UpperCamelCase : List[str] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCamelCase : List[Any] = min_timescale * jnp.exp(jnp.arange(_lowerCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCamelCase : List[str] = jnp.expand_dims(_lowerCAmelCase , 1 ) * jnp.expand_dims(_lowerCAmelCase , 0 )
# scale embeddings
UpperCamelCase : int = scale * emb
if flip_sin_to_cos:
UpperCamelCase : Union[str, Any] = jnp.concatenate([jnp.cos(_lowerCAmelCase ), jnp.sin(_lowerCAmelCase )] , axis=1 )
else:
UpperCamelCase : Any = jnp.concatenate([jnp.sin(_lowerCAmelCase ), jnp.cos(_lowerCAmelCase )] , axis=1 )
UpperCamelCase : Optional[Any] = jnp.reshape(_lowerCAmelCase , [jnp.shape(_lowerCAmelCase )[0], embedding_dim] )
return signal
class A__ ( nn.Module ):
_UpperCAmelCase :int = 3_2
_UpperCAmelCase :jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(A_ )
UpperCamelCase : Optional[int] = nn.silu(A_ )
UpperCamelCase : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(A_ )
return temb
class A__ ( nn.Module ):
_UpperCAmelCase :int = 3_2
_UpperCAmelCase :bool = False
_UpperCAmelCase :float = 1
@nn.compact
def __call__( self , A_ ):
'''simple docstring'''
return get_sinusoidal_embeddings(
A_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 52 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__lowerCamelCase : Union[str, Any] = pytest.mark.integration
@require_faiss
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} )
return dset
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dataset = self._create_dummy_dataset()
UpperCamelCase : List[Any] = dset.map(
lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ )
UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase , UpperCamelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
UpperCamelCase , UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def __UpperCamelCase( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
UpperCamelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCamelCase : List[str] = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
UpperCamelCase : Optional[Any] = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=A_ )
UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
UpperCamelCase : Any = np.zeros(5 , dtype=np.floataa )
UpperCamelCase : Optional[Any] = 1
UpperCamelCase , UpperCamelCase : Optional[Any] = index.search(A_ )
self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1]
UpperCamelCase , UpperCamelCase : Tuple = index.search_batch(A_ )
self.assertRaises(A_ , index.search_batch , queries[0] )
UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores]
UpperCamelCase : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
UpperCamelCase : List[str] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(A_ ):
UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : Dict = faiss.IndexFlat(5 )
UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=A_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __UpperCamelCase( self ):
'''simple docstring'''
import faiss
UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
index.save(tmp_file.name )
UpperCamelCase : int = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase : str = np.zeros(5 , dtype=np.floataa )
UpperCamelCase : int = 1
UpperCamelCase , UpperCamelCase : Dict = index.search(A_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def A_ ( _lowerCAmelCase ) -> Optional[int]:
import faiss
UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCamelCase : List[Any] = "index.faiss"
UpperCamelCase : List[str] = F"""mock://{index_name}"""
index.save(_lowerCAmelCase , storage_options=mockfs.storage_options )
UpperCamelCase : List[str] = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options )
UpperCamelCase : List[str] = np.zeros(5 , dtype=np.floataa )
UpperCamelCase : Optional[int] = 1
UpperCamelCase , UpperCamelCase : List[str] = index.search(_lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A__ ( __snake_case ):
def __UpperCamelCase( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCamelCase : List[str] = Elasticsearch()
UpperCamelCase : Union[str, Any] = {"acknowledged": True}
UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=A_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
UpperCamelCase : str = "foo"
UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCamelCase , UpperCamelCase : Tuple = index.search(A_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
UpperCamelCase : Dict = "foo"
UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCamelCase , UpperCamelCase : str = index.search(A_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
UpperCamelCase : Dict = ["foo", "bar", "foobar"]
UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCamelCase , UpperCamelCase : Optional[int] = index.search_batch(A_ )
UpperCamelCase : str = [scores[0] for scores in total_scores]
UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
# batched queries with timeout
UpperCamelCase : int = ["foo", "bar", "foobar"]
UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCamelCase , UpperCamelCase : Union[str, Any] = index.search_batch(A_ , request_timeout=30 )
UpperCamelCase : Union[str, Any] = [scores[0] for scores in total_scores]
UpperCamelCase : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
| 52 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase: Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class a__( A_ , unittest.TestCase ):
lowercase__ = BartphoTokenizer
lowercase__ = False
lowercase__ = True
def lowercase_ ( self : Dict ):
super().setUp()
a : Optional[int] = ["▁This", "▁is", "▁a", "▁t", "est"]
a : Optional[int] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
a : List[Any] = {"unk_token": "<unk>"}
a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
a : Optional[int] = BartphoTokenizer(snake_case__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : int , **__snake_case : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase_ ( self : str , __snake_case : Dict ):
a : Optional[Any] = "This is a là test"
a : List[Any] = "This is a<unk><unk> test"
return input_text, output_text
def lowercase_ ( self : List[Any] ):
a : List[Any] = BartphoTokenizer(snake_case__ , self.monolingual_vocab_file , **self.special_tokens_map )
a : int = "This is a là test"
a : Tuple = "▁This ▁is ▁a ▁l à ▁t est".split()
a : Union[str, Any] = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
a : Optional[int] = tokens + [tokenizer.unk_token]
a : List[Any] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) | 360 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = CTRLTokenizer
lowercase__ = False
lowercase__ = False
def lowercase_ ( self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Tuple = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Union[str, Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
a : Union[str, Any] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : Optional[Any] = {'unk_token': '<unk>'}
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def lowercase_ ( self : int , **__snake_case : str ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : Optional[int] , __snake_case : Any ):
a : int = 'adapt react readapt apt'
a : Any = 'adapt react readapt apt'
return input_text, output_text
def lowercase_ ( self : Dict ):
a : Dict = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a : List[str] = 'adapt react readapt apt'
a : Dict = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : Any = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
a : Dict = tokens + [tokenizer.unk_token]
a : Optional[Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case ) | 96 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__A )
class A ( __A ):
UpperCamelCase_ : Dict =field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase_ : List[str] =Features({'''image''': Image()} )
UpperCamelCase_ : Optional[Any] =Features({'''labels''': ClassLabel} )
UpperCamelCase_ : List[str] ='''image'''
UpperCamelCase_ : Union[str, Any] ='''labels'''
def _A (self , lowerCAmelCase ):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , _lowerCamelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
__lowercase= copy.deepcopy(self )
__lowercase= self.label_schema.copy()
__lowercase= features[self.label_column]
__lowercase= label_schema
return task_template
@property
def _A (self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 295 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = state_dict.pop(a_ )
A_ : Tuple = val
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A_ : str = value
else:
A_ : int = value
return new_state_dict
def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]:
"""simple docstring"""
A_ : List[Any] = """"""
if is_panoptic:
A_ : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[:2_5_6, :]
A_ : Tuple = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : int = in_proj_bias[2_5_6:5_1_2]
A_ : int = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
A_ : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : str = """resnet101"""
if "dc5" in model_name:
A_ : List[Any] = True
A_ : str = """panoptic""" in model_name
if is_panoptic:
A_ : Dict = 2_5_0
else:
A_ : Union[str, Any] = 9_1
A_ : str = """huggingface/label-files"""
A_ : Union[str, Any] = """coco-detection-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : str = {int(a_ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
A_ : Any = ConditionalDetrImageProcessor(format=a_ )
# prepare image
A_ : Tuple = prepare_img()
A_ : Any = image_processor(images=a_ , return_tensors="""pt""" )
A_ : Optional[int] = encoding["""pixel_values"""]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval()
A_ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : Union[str, Any] = """conditional_detr.""" + src
rename_key(a_ , a_ , a_ )
A_ : Any = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A_ : Dict = state_dict.pop(a_ )
A_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : str = state_dict.pop(a_ )
A_ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A_ : Optional[int] = state_dict.pop(a_ )
A_ : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A_ : Tuple = state_dict.pop(a_ )
A_ : Dict = val
# finally, create HuggingFace model and load state dict
A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A_ : str = conditional_detr(a_ )
A_ : str = model(a_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 344 | 0 |
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : str):
'''simple docstring'''
def get_matched_characters(lowerCamelCase_ : str ,lowerCamelCase_ : str) -> str:
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : List[str] = min(len(_stra) ,len(_stra)) // 2
for i, l in enumerate(_stra):
lowerCAmelCase__ : Optional[Any] = int(max(0 ,i - limit))
lowerCAmelCase__ : List[Any] = int(min(i + limit + 1 ,len(_stra)))
if l in _stra[left:right]:
matched.append(a_)
lowerCAmelCase__ : str = f"""{_stra[0:_stra.index(a_)]} {_stra[_stra.index(a_) + 1:]}"""
return "".join(a_)
# matching characters
lowerCAmelCase__ : Optional[int] = get_matched_characters(a_ ,a_)
lowerCAmelCase__ : Dict = get_matched_characters(a_ ,a_)
lowerCAmelCase__ : List[str] = len(a_)
# transposition
lowerCAmelCase__ : List[Any] = (
len([(ca, ca) for ca, ca in zip(a_ ,a_) if ca != ca]) // 2
)
if not match_count:
lowerCAmelCase__ : List[Any] = 0.0
else:
lowerCAmelCase__ : Optional[Any] = (
1
/ 3
* (
match_count / len(a_)
+ match_count / len(a_)
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowerCAmelCase__ : List[Any] = 0
for ca, ca in zip(stra[:4] ,stra[:4]):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 367 |
def lowerCAmelCase__ ( lowerCamelCase_ : int = 1000):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = 1, 1
lowerCAmelCase__ : Any = 2
while True:
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Any = fa + fa
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = fa, f
index += 1
for _ in str(lowerCamelCase_):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 94 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Optional[Any] = "git_vision_model"
def __init__( self, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_="quick_gelu", SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.02, **SCREAMING_SNAKE_CASE_, ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = hidden_size
UpperCamelCase : Any = intermediate_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : int = num_channels
UpperCamelCase : int = patch_size
UpperCamelCase : List[Any] = image_size
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : str = attention_dropout
UpperCamelCase : Dict = layer_norm_eps
UpperCamelCase : List[str] = hidden_act
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
UpperCamelCase : Optional[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Tuple = "git"
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=3_0522, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_="absolute", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=101, SCREAMING_SNAKE_CASE_=102, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Dict:
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, pad_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
if vision_config is None:
UpperCamelCase : Optional[Any] = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
UpperCamelCase : Optional[Any] = GitVisionConfig(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : Any = intermediate_size
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : List[str] = max_position_embeddings
UpperCamelCase : Any = initializer_range
UpperCamelCase : Any = layer_norm_eps
UpperCamelCase : str = position_embedding_type
UpperCamelCase : Union[str, Any] = use_cache
UpperCamelCase : Dict = tie_word_embeddings
UpperCamelCase : Dict = num_image_with_embedding
UpperCamelCase : Union[str, Any] = bos_token_id
UpperCamelCase : Optional[int] = eos_token_id
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Tuple = copy.deepcopy(self.__dict__ )
UpperCamelCase : Optional[Any] = self.vision_config.to_dict()
UpperCamelCase : str = self.__class__.model_type
return output
| 119 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Tuple = "bloom"
UpperCAmelCase__ : List[Any] = ["past_key_values"]
UpperCAmelCase__ : str = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self, SCREAMING_SNAKE_CASE_=25_0880, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
UpperCamelCase : str = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase : Optional[Any] = kwargs.pop('n_embed', SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = hidden_size if n_embed is None else n_embed
UpperCamelCase : Tuple = n_layer
UpperCamelCase : Dict = n_head
UpperCamelCase : List[Any] = layer_norm_epsilon
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : int = use_cache
UpperCamelCase : int = pretraining_tp
UpperCamelCase : Optional[int] = apply_residual_connection_post_layernorm
UpperCamelCase : str = hidden_dropout
UpperCamelCase : str = attention_dropout
UpperCamelCase : List[Any] = bos_token_id
UpperCamelCase : Tuple = eos_token_id
UpperCamelCase : Union[str, Any] = slow_but_exact
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Any = version.parse("1.12" )
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "default", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE_, task=SCREAMING_SNAKE_CASE_, patching_specs=SCREAMING_SNAKE_CASE_, use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config, 'pad_token_id', SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
UpperCamelCase : Tuple = 0
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_, direction='inputs', inverted_values_shape=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def snake_case_ ( self ) -> int:
return self._config.n_layer
@property
def snake_case_ ( self ) -> int:
return self._config.n_head
@property
def snake_case_ ( self ) -> float:
return 1e-3
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> Mapping[str, Any]:
UpperCamelCase : Dict = super(SCREAMING_SNAKE_CASE_, self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_, batch_size=SCREAMING_SNAKE_CASE_, seq_length=SCREAMING_SNAKE_CASE_, is_pair=SCREAMING_SNAKE_CASE_, framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase : Any = seqlen + 2
UpperCamelCase : Optional[int] = self._config.hidden_size // self.num_attention_heads
UpperCamelCase : Any = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCamelCase : Optional[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCamelCase : List[str] = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
UpperCamelCase : str = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase : int = ordered_inputs['attention_mask'].dtype
UpperCamelCase : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )], dim=1 )
return ordered_inputs
@property
def snake_case_ ( self ) -> int:
return 13
| 119 | 1 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_: Dict =TypeVar('T')
class __A ( Generic[T] ):
a__ : deque[T] # Cache store of keys
a__ : set[T] # References of the keys in cache
a__ : int = 10 # Maximum capacity of cache
def __init__(self : List[str] , __a : int ):
UpperCAmelCase_ = deque()
UpperCAmelCase_ = set()
if not n:
UpperCAmelCase_ = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
UpperCAmelCase_ = n
def _lowercase (self : Optional[Any] , __a : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase_ = self.dq_store.pop()
self.key_reference.remove(__a )
else:
self.dq_store.remove(__a )
self.dq_store.appendleft(__a )
self.key_reference.add(__a )
def _lowercase (self : Optional[int] ):
for k in self.dq_store:
print(__a )
def __repr__(self : List[Any] ):
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_: LRUCache[str | int] =LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 106 | '''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : List[Any] = MobileBertTokenizer
a__ : str = MobileBertTokenizerFast
a__ : List[str] = True
a__ : Dict = True
a__ : Optional[int] = filter_non_english
a__ : int = """google/mobilebert-uncased"""
def _lowercase (self : List[str] ):
super().setUp()
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _lowercase (self : Tuple , __a : str ):
UpperCAmelCase_ = "UNwant\u00E9d,running"
UpperCAmelCase_ = "unwanted, running"
return input_text, output_text
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def _lowercase (self : Dict ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "UNwant\u00E9d,running"
UpperCAmelCase_ = tokenizer.tokenize(__a )
UpperCAmelCase_ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# With lower casing
UpperCAmelCase_ = self.get_tokenizer(do_lower_case=__a )
UpperCAmelCase_ = self.get_rust_tokenizer(do_lower_case=__a )
UpperCAmelCase_ = "UNwant\u00E9d,running"
UpperCAmelCase_ = tokenizer.tokenize(__a )
UpperCAmelCase_ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def _lowercase (self : Dict ):
UpperCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase (self : Any ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _lowercase (self : Any ):
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase_ = {}
for i, token in enumerate(__a ):
UpperCAmelCase_ = i
UpperCAmelCase_ = WordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _lowercase (self : Optional[int] ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _lowercase (self : str ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _lowercase (self : Any ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _lowercase (self : Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase_ = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
UpperCAmelCase_ = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
UpperCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = ["的", "人", "有"]
UpperCAmelCase_ = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = tokenizer_p.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_r.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(__a )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = False
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = tokenizer_r.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_p.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(__a )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase_ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
| 106 | 1 |
from __future__ import annotations
import os
from collections.abc import Mapping
lowerCAmelCase : Any = tuple[int, int]
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : set[int] = vertices
SCREAMING_SNAKE_CASE_ : dict[EdgeT, int] = {
(min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
SCREAMING_SNAKE_CASE_ : List[str] = weight
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Graph = Graph({min(self.vertices )} , {} )
SCREAMING_SNAKE_CASE_ : EdgeT
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : EdgeT
SCREAMING_SNAKE_CASE_ : int
while len(subgraph.vertices ) < len(self.vertices ):
SCREAMING_SNAKE_CASE_ : Tuple = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
SCREAMING_SNAKE_CASE_ : List[Any] = edge
SCREAMING_SNAKE_CASE_ : Dict = weight
subgraph.add_edge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return subgraph
def A_ ( a = "p107_network.txt" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = os.path.abspath(os.path.dirname(a ) )
SCREAMING_SNAKE_CASE_ : str = os.path.join(a , a )
SCREAMING_SNAKE_CASE_ : dict[EdgeT, int] = {}
SCREAMING_SNAKE_CASE_ : list[str]
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
with open(a ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read().strip().split('\n' )
SCREAMING_SNAKE_CASE_ : Any = [line.split(',' ) for line in data]
for edgea in range(1 , len(a ) ):
for edgea in range(a ):
if adjaceny_matrix[edgea][edgea] != "-":
SCREAMING_SNAKE_CASE_ : str = int(adjaceny_matrix[edgea][edgea] )
SCREAMING_SNAKE_CASE_ : Graph = Graph(set(range(len(a ) ) ) , a )
SCREAMING_SNAKE_CASE_ : Graph = graph.prims_algorithm()
SCREAMING_SNAKE_CASE_ : int = sum(graph.edges.values() )
SCREAMING_SNAKE_CASE_ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 253 |
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase : Dict = 'docs/source/en/_toctree.yml'
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = defaultdict(a )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_ : Tuple = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_ : int = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_ : List[Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(a ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(a , key=lambda a : s["title"].lower() )
def A_ ( a=False ):
"""simple docstring"""
with open(a , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : str = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_ : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_ : List[str] = content[api_idx]['sections']
# Then to the model doc
SCREAMING_SNAKE_CASE_ : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = api_doc[model_idx]['sections']
SCREAMING_SNAKE_CASE_ : List[str] = [(idx, section) for idx, section in enumerate(a ) if 'sections' in section]
SCREAMING_SNAKE_CASE_ : List[Any] = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_ : Tuple = modality_doc['sections']
SCREAMING_SNAKE_CASE_ : int = clean_model_doc_toc(a )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_ : List[str] = True
if overwrite:
SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_ : List[Any] = model_doc
SCREAMING_SNAKE_CASE_ : List[Any] = api_doc
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(a , allow_unicode=a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase : List[str] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 253 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[int] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : int = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[int] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ : Dict = AutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : int = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Tuple = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ : List[Any] = AutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : int = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : int = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ : int = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Tuple = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowercase__ : List[str] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Optional[int] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
| 360 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = '''The dog is cute and lives in the garden house'''
lowercase__ : int = jnp.array([tokenizer.encode(_snake_case )] )
lowercase__ : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowercase__ : Tuple = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
lowercase__ : Optional[Any] = model(_snake_case )['''last_hidden_state''']
self.assertEqual(output.shape ,_snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,_snake_case ,atol=1e-3 ) )
| 302 | 0 |
'''simple docstring'''
from statistics import mean, stdev
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = min(snake_case )
snake_case_ = max(snake_case )
# normalize data
return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data]
def UpperCamelCase_( snake_case : list , snake_case : int = 3 ):
'''simple docstring'''
snake_case_ = mean(snake_case )
snake_case_ = stdev(snake_case )
# standardize data
return [round((x - mu) / (sigma) , snake_case ) for x in data]
| 85 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
snake_case : Dict = re.compile(R"\b(a|an|the)\b", re.UNICODE)
snake_case : Optional[int] = None
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Any = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=_snake_case , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=_snake_case , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase_ ( _snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : str = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(_snake_case : List[str] ):
return ARTICLES_REGEX.sub(" " , _snake_case )
def white_space_fix(_snake_case : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_snake_case : Optional[int] ):
__magic_name__ : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_snake_case : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_snake_case ) ) ) )
def lowerCAmelCase_ ( _snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
if not s:
return []
return normalize_answer(_snake_case ).split()
def lowerCAmelCase_ ( _snake_case : str , _snake_case : Dict ) -> Tuple:
'''simple docstring'''
return int(normalize_answer(_snake_case ) == normalize_answer(_snake_case ) )
def lowerCAmelCase_ ( _snake_case : List[str] , _snake_case : int ) -> str:
'''simple docstring'''
__magic_name__ : Any = get_tokens(_snake_case )
__magic_name__ : Optional[int] = get_tokens(_snake_case )
__magic_name__ : Tuple = collections.Counter(_snake_case ) & collections.Counter(_snake_case )
__magic_name__ : Tuple = sum(common.values() )
if len(_snake_case ) == 0 or len(_snake_case ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__magic_name__ : Dict = 1.0 * num_same / len(_snake_case )
__magic_name__ : Optional[Any] = 1.0 * num_same / len(_snake_case )
__magic_name__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = {}
__magic_name__ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__magic_name__ : Union[str, Any] = qa["id"]
__magic_name__ : Any = [t for t in qa["answers"]["text"] if normalize_answer(_snake_case )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__magic_name__ : Tuple = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
__magic_name__ : Any = preds[qid]
# Take max over all gold answers
__magic_name__ : List[Any] = max(compute_exact(_snake_case , _snake_case ) for a in gold_answers )
__magic_name__ : int = max(compute_fa(_snake_case , _snake_case ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase_ ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : str = {}
for qid, s in scores.items():
__magic_name__ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
__magic_name__ : str = float(not qid_to_has_ans[qid] )
else:
__magic_name__ : Optional[int] = s
return new_scores
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Tuple=None ) -> Tuple:
'''simple docstring'''
if not qid_list:
__magic_name__ : Any = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
__magic_name__ : Tuple = len(_snake_case )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : str , _snake_case : str ) -> Dict:
'''simple docstring'''
for k in new_eval:
__magic_name__ : int = new_eval[k]
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> str:
'''simple docstring'''
plt.step(_snake_case , _snake_case , color="b" , alpha=0.2 , where="post" )
plt.fill_between(_snake_case , _snake_case , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_snake_case )
plt.savefig(_snake_case )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Dict , _snake_case : Any , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]=None , _snake_case : int=None ) -> str:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
__magic_name__ : Optional[int] = 0.0
__magic_name__ : str = 1.0
__magic_name__ : str = 0.0
__magic_name__ : List[str] = [1.0]
__magic_name__ : str = [0.0]
__magic_name__ : Optional[Any] = 0.0
for i, qid in enumerate(_snake_case ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__magic_name__ : List[str] = true_pos / float(i + 1 )
__magic_name__ : Any = true_pos / float(_snake_case )
if i == len(_snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_snake_case )
recalls.append(_snake_case )
if out_image:
plot_pr_curve(_snake_case , _snake_case , _snake_case , _snake_case )
return {"ap": 100.0 * avg_prec}
def lowerCAmelCase_ ( _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
__magic_name__ : Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
__magic_name__ : Union[str, Any] = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
__magic_name__ : str = {k: float(_snake_case ) for k, v in qid_to_has_ans.items()}
__magic_name__ : str = make_precision_recall_eval(
_snake_case , _snake_case , _snake_case , _snake_case , out_image=os.path.join(_snake_case , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(_snake_case , _snake_case , "pr_exact" )
merge_eval(_snake_case , _snake_case , "pr_f1" )
merge_eval(_snake_case , _snake_case , "pr_oracle" )
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
if not qid_list:
return
__magic_name__ : Dict = [na_probs[k] for k in qid_list]
__magic_name__ : str = np.ones_like(_snake_case ) / float(len(_snake_case ) )
plt.hist(_snake_case , weights=_snake_case , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_snake_case , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Dict ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Union[str, Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__magic_name__ : List[str] = num_no_ans
__magic_name__ : Dict = cur_score
__magic_name__ : Dict = 0.0
__magic_name__ : Any = sorted(_snake_case , key=lambda _snake_case : na_probs[k] )
for i, qid in enumerate(_snake_case ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__magic_name__ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
__magic_name__ : List[Any] = -1
else:
__magic_name__ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
__magic_name__ : Optional[int] = cur_score
__magic_name__ : List[Any] = na_probs[qid]
return 100.0 * best_score / len(_snake_case ), best_thresh
def lowerCAmelCase_ ( _snake_case : int , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ : List[str] = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ , __magic_name__ : int = find_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case )
__magic_name__ : Optional[int] = best_exact
__magic_name__ : List[Any] = exact_thresh
__magic_name__ : Dict = best_fa
__magic_name__ : Any = fa_thresh
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
with open(OPTS.data_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
__magic_name__ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file ) as f:
__magic_name__ : Optional[Any] = json.load(_snake_case )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__magic_name__ : Any = json.load(_snake_case )
else:
__magic_name__ : Any = {k: 0.0 for k in preds}
__magic_name__ : str = make_qid_to_has_ans(_snake_case ) # maps qid to True/False
__magic_name__ : Tuple = [k for k, v in qid_to_has_ans.items() if v]
__magic_name__ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if not v]
__magic_name__ , __magic_name__ : Union[str, Any] = get_raw_scores(_snake_case , _snake_case )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : Optional[Any] = apply_no_ans_threshold(_snake_case , _snake_case , _snake_case , OPTS.na_prob_thresh )
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case )
if has_ans_qids:
__magic_name__ : int = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "HasAns" )
if no_ans_qids:
__magic_name__ : List[Any] = make_eval_dict(_snake_case , _snake_case , qid_list=_snake_case )
merge_eval(_snake_case , _snake_case , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , OPTS.out_image_dir )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(_snake_case , _snake_case , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(_snake_case , _snake_case )
else:
print(json.dumps(_snake_case , indent=2 ) )
if __name__ == "__main__":
snake_case : int = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 281 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
a : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
a : Dict = 128022
a : str = 128028
@require_sentencepiece
class __UpperCAmelCase( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = MaMaaaTokenizer
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = True
def UpperCAmelCase_ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ : int= ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowercase__ : Optional[int]= dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : str= Path(self.tmpdirname )
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowercase__ : Union[str, Any]= MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= """</s>"""
lowercase__ : Optional[Any]= 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= self.get_tokenizer()
lowercase__ : Dict= list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= self.get_tokenizer()
lowercase__ : str= tokenizer.tokenize("This is a test" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [2, 3, 4, 5, 6] , )
lowercase__ : int= tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
lowercase__ : Dict= tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , "This is a test" )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= {"""input_ids""": [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = 'facebook/m2m100_418M'
__lowerCamelCase = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
__lowerCamelCase = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
__lowerCamelCase = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def UpperCAmelCase_ ( cls ):
'''simple docstring'''
lowercase__ : MaMaaaTokenizer= MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
lowercase__ : Optional[int]= 1
return cls
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128063 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= self.tokenizer.get_vocab()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= """en"""
lowercase__ : List[Any]= self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.assertIn(_SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
# fmt: off
lowercase__ : List[Any]= [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
lowercase__ : List[str]= self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int]= self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : str= tempfile.mkdtemp()
lowercase__ : Union[str, Any]= self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
lowercase__ : str= MaMaaaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.lang_token_to_id , _SCREAMING_SNAKE_CASE )
@require_torch
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= """en"""
lowercase__ : Dict= """fr"""
lowercase__ : Optional[int]= self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
lowercase__ : Tuple= shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowercase__ : int= batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowercase__ : int= """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowercase__ : Union[str, Any]= """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
# en_XX, A, test, EOS
"input_ids": [[128022, 58, 4183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128006,
} , )
| 360 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
a : List[str] = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase_ ( cls ):
'''simple docstring'''
lowercase__ : Union[str, Any]= TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def UpperCAmelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
lowercase__ : List[Any]= BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , repo_id="test-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase__ : int= BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
lowercase__ : Optional[Any]= BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase__ : Any= BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowercase__ : Union[str, Any]= CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
lowercase__ : List[str]= AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase__ : str= c.n_embd + 1 # int
lowercase__ : Tuple= c.resid_pdrop + 1.0 # float
lowercase__ : Union[str, Any]= not c.scale_attn_weights # bool
lowercase__ : Optional[Any]= c.summary_type + "foo" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(snake_case__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case__ , c.summary_type , "mismatch for key: summary_type" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= PretrainedConfig()
lowercase__ : List[str]= [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
lowercase__ : Tuple= [key for key, value in config_common_kwargs.items() if value == getattr(snake_case__ , snake_case__ )]
if len(snake_case__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F''' {', '.join(snake_case__ )}.''' )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(snake_case__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__ : Optional[int]= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
lowercase__ : Optional[Any]= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
lowercase__ : str= mock.Mock()
lowercase__ : Optional[Any]= 500
lowercase__ : Any= {}
lowercase__ : Tuple= HTTPError
lowercase__ : List[Any]= {}
# Download this model to make sure it's in the cache.
lowercase__ : Any= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
lowercase__ : Any= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
lowercase__ : Optional[Any]= BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= AutoConfig.from_pretrained("bert-base-cased" )
lowercase__ : Optional[int]= ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case__ )
lowercase__ : List[Any]= 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase__ : int= AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase__ : Optional[int]= ["config.42.0.0.json"]
lowercase__ : int= 768
configuration.save_pretrained(snake_case__ )
shutil.move(os.path.join(snake_case__ , "config.4.0.0.json" ) , os.path.join(snake_case__ , "config.42.0.0.json" ) )
lowercase__ : Optional[Any]= AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowercase__ : Optional[Any]= "hf-internal-testing/test-two-configs"
import transformers as new_transformers
lowercase__ : Optional[Any]= "v4.0.0"
lowercase__, lowercase__ : str= new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case__ , return_unused_kwargs=snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase__ : Dict= "v3.0.0"
lowercase__ : Tuple= old_transformers.models.auto.AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 150 | 0 |
import torch
def lowerCamelCase_ ( ):
'''simple docstring'''
if torch.cuda.is_available():
UpperCAmelCase_ : Tuple = torch.cuda.device_count()
else:
UpperCAmelCase_ : Optional[int] = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 345 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 1 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__lowerCamelCase : str = """
import os
"""
__lowerCamelCase : Union[str, Any] = """
def foo():
import os
return False
"""
__lowerCamelCase : str = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
__lowerCamelCase : Optional[Any] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
__lowerCamelCase : Tuple = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
__lowerCamelCase : str = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
__lowerCamelCase : Tuple = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
__lowerCamelCase : Optional[Any] = """
import os
try:
import bar
except:
raise ValueError()
"""
__lowerCamelCase : List[str] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
__lowerCamelCase : str = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
__lowerCamelCase : List[Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , __lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
UpperCamelCase : Any = os.path.join(__lowerCAmelCase , "test_file.py" )
with open(__lowerCAmelCase , "w" ) as _tmp_file:
_tmp_file.write(__lowerCAmelCase )
UpperCamelCase : Any = get_imports(__lowerCAmelCase )
assert parsed_imports == ["os"]
| 350 |
def A_ ( _lowerCAmelCase ) -> int:
if n == 1 or not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return 0
elif n == 2:
return 1
else:
UpperCamelCase : Optional[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def A_ ( _lowerCAmelCase ) -> int:
UpperCamelCase : Any = 0
UpperCamelCase : List[Any] = 2
while digits < n:
index += 1
UpperCamelCase : Union[str, Any] = len(str(fibonacci(_lowerCAmelCase ) ) )
return index
def A_ ( _lowerCAmelCase = 1000 ) -> int:
return fibonacci_digits_index(_lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 140 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCamelCase : Tuple = np.zeros((n + 1,) )
_lowerCamelCase : List[Any] = ya
_lowerCamelCase : Optional[int] = xa
for k in range(snake_case__ ):
_lowerCamelCase : Any = y[k] + step_size * ode_func(snake_case__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | from math import isqrt
def snake_case ( snake_case__ :int) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__) + 1))
def snake_case ( snake_case__ :int = 10**6) -> int:
_A = 0
_A = 1
_A = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__)
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 180 | 0 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.txt"""}
UpperCAmelCase = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
UpperCAmelCase = {
"""facebook/esm2_t6_8M_UR50D""": 1024,
"""facebook/esm2_t12_35M_UR50D""": 1024,
}
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
snake_case_ = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCAmelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<cls>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase="<eos>" , **_UpperCAmelCase , ):
super().__init__(**a__ )
snake_case_ = load_vocab_file(a__ )
snake_case_ = dict(enumerate(self.all_tokens ) )
snake_case_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
snake_case_ = unk_token
snake_case_ = cls_token
snake_case_ = pad_token
snake_case_ = mask_token
snake_case_ = eos_token
snake_case_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return self._id_to_token.get(a__ , self.unk_token )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return self._token_to_id.get(a__ , self._token_to_id.get(self.unk_token ) )
def UpperCamelCase__ ( self , _UpperCAmelCase , **_UpperCAmelCase ):
return text.split()
def UpperCamelCase__ ( self , _UpperCAmelCase=False ):
return len(self._id_to_token )
def UpperCamelCase__ ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return self._token_to_id.get(a__ , self._token_to_id.get(self.unk_token ) )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return self._id_to_token.get(a__ , self.unk_token )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
snake_case_ = [self.cls_token_id]
snake_case_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
snake_case_ = [1] + ([0] * len(a__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(a__ ) + [1]
return mask
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = os.path.join(a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(a__ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def UpperCamelCase__ ( self ):
return self.get_vocab_size(with_added_tokens=a__ )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
return super()._add_tokens(a__ , special_tokens=a__ ) | 364 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=99 , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=9 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=8 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.002 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = encoder_seq_length
snake_case_ = decoder_seq_length
# For common tests
snake_case_ = self.decoder_seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = d_ff
snake_case_ = relative_attention_num_buckets
snake_case_ = dropout_rate
snake_case_ = initializer_factor
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = decoder_start_token_id
snake_case_ = None
snake_case_ = decoder_layers
def UpperCamelCase__ ( self ):
return TaConfig.from_pretrained('''google/umt5-base''' )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if attention_mask is None:
snake_case_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_UpperCAmelCase )
if decoder_head_mask is None:
snake_case_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_UpperCAmelCase )
if cross_attn_head_mask is None:
snake_case_ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def UpperCamelCase__ ( self ):
snake_case_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case_ = input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case_ = self.get_config()
snake_case_ = config.num_attention_heads
snake_case_ = self.prepare_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, input_dict
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self ):
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCamelCase__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
snake_case_ = UMTaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(
input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , )
snake_case_ = model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
snake_case_ = result.last_hidden_state
snake_case_ = result.past_key_values
snake_case_ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
snake_case_ = UMTaModel(config=_UpperCAmelCase ).get_decoder().to(_UpperCAmelCase ).eval()
# first forward pass
snake_case_ = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = model(_UpperCAmelCase )['''last_hidden_state''']
snake_case_ = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )['''last_hidden_state''']
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , ):
snake_case_ = UMTaModel(config=_UpperCAmelCase ).to(_UpperCAmelCase ).half().eval()
snake_case_ = model(**_UpperCAmelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(_UpperCAmelCase ).any().item() )
@require_torch
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__snake_case = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__snake_case = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = True
__snake_case = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__snake_case = [0.8, 0.9]
def UpperCamelCase__ ( self ):
snake_case_ = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = UMTaModel(config_and_inputs[0] ).to(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=_UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = config_and_inputs[0]
snake_case_ = UMTaForConditionalGeneration(_UpperCAmelCase ).eval()
model.to(_UpperCAmelCase )
snake_case_ = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=_UpperCAmelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_UpperCAmelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(_UpperCAmelCase , head_masking.items() ):
snake_case_ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case_ = torch.ones(
config.num_decoder_layers , config.num_heads , device=_UpperCAmelCase )
snake_case_ = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=_UpperCAmelCase , return_dict_in_generate=_UpperCAmelCase , **_UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def UpperCamelCase__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def UpperCamelCase__ ( self ):
snake_case_ = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=_UpperCAmelCase ).to(_UpperCAmelCase )
snake_case_ = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=_UpperCAmelCase , legacy=_UpperCAmelCase )
snake_case_ = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
snake_case_ = tokenizer(_UpperCAmelCase , return_tensors='''pt''' , padding=_UpperCAmelCase ).input_ids
# fmt: off
snake_case_ = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = model.generate(input_ids.to(_UpperCAmelCase ) )
snake_case_ = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
snake_case_ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) | 267 | 0 |
from __future__ import annotations
import math
def __lowerCamelCase ( __a :int , __a :int , __a :bool , __a :list[int] , __a :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__a ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __a , __a , __a ) , minimax(depth + 1 , node_index * 2 + 1 , __a , __a , __a ) , )
return min(
minimax(depth + 1 , node_index * 2 , __a , __a , __a ) , minimax(depth + 1 , node_index * 2 + 1 , __a , __a , __a ) , )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
A__ = math.log(len(__a ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __a , __a , __a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 274 |
from __future__ import annotations
from PIL import Image
# Define glider example
A : Any = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
A : Optional[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __lowerCamelCase ( __a :list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
A__ = []
for i in range(len(__a ) ):
A__ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A__ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__a ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__a ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__a ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A__ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__a )
return next_generation
def __lowerCamelCase ( __a :list[list[int]] , __a :int ) -> list[Image.Image]:
"""simple docstring"""
A__ = []
for _ in range(__a ):
# Create output image
A__ = Image.new("""RGB""" , (len(cells[0] ), len(__a )) )
A__ = img.load()
# Save cells to image
for x in range(len(__a ) ):
for y in range(len(cells[0] ) ):
A__ = 2_5_5 - cells[y][x] * 2_5_5
A__ = (colour, colour, colour)
# Save image
images.append(__a )
A__ = new_generation(__a )
return images
if __name__ == "__main__":
A : str = generate_images(GLIDER, 1_6)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 274 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : str = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_: List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_: int = ""
else:
SCREAMING_SNAKE_CASE_: Dict = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_: Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE_: Any = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_: Any = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_: str = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_: Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_: List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_: Dict = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_: Optional[Any] = in_proj_bias[-config.hidden_size :]
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = dct.pop(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = val
def A_ ( ):
SCREAMING_SNAKE_CASE_: int = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_: str = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True ):
SCREAMING_SNAKE_CASE_: int = ViTConfig()
# patch_size
if model_name[-1] == "8":
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
# set labels if required
if not base_model:
SCREAMING_SNAKE_CASE_: List[str] = 10_00
SCREAMING_SNAKE_CASE_: str = "huggingface/label-files"
SCREAMING_SNAKE_CASE_: Union[str, Any] = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_: Tuple = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_: Any = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: List[Any] = idalabel
SCREAMING_SNAKE_CASE_: Tuple = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
SCREAMING_SNAKE_CASE_: List[Any] = 3_84
SCREAMING_SNAKE_CASE_: Any = 15_36
SCREAMING_SNAKE_CASE_: Any = 12
SCREAMING_SNAKE_CASE_: List[str] = 6
# load original model from torch hub
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.hub.load("facebookresearch/dino:main" , _UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_: Tuple = original_model.state_dict()
if base_model:
remove_classification_head_(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = create_rename_keys(_UpperCAmelCase , base_model=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# load HuggingFace model
if base_model:
SCREAMING_SNAKE_CASE_: List[str] = ViTModel(_UpperCAmelCase , add_pooling_layer=_UpperCAmelCase ).eval()
else:
SCREAMING_SNAKE_CASE_: Any = ViTForImageClassification(_UpperCAmelCase ).eval()
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
SCREAMING_SNAKE_CASE_: Any = ViTImageProcessor()
SCREAMING_SNAKE_CASE_: int = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE_: List[str] = encoding["pixel_values"]
SCREAMING_SNAKE_CASE_: List[Any] = model(_UpperCAmelCase )
if base_model:
SCREAMING_SNAKE_CASE_: Tuple = original_model(_UpperCAmelCase )
assert torch.allclose(_UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
SCREAMING_SNAKE_CASE_: Dict = original_model(_UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1e-3 )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 364 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = ['''pixel_values''']
def __init__( self : List[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : Tuple , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = size if size is not None else {"height": 384, "width": 384}
SCREAMING_SNAKE_CASE_: Union[str, Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = do_resize
SCREAMING_SNAKE_CASE_: Dict = size
SCREAMING_SNAKE_CASE_: int = resample
SCREAMING_SNAKE_CASE_: str = do_rescale
SCREAMING_SNAKE_CASE_: str = rescale_factor
SCREAMING_SNAKE_CASE_: Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_: List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_: Optional[int] = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
SCREAMING_SNAKE_CASE_: int = (size["height"], size["width"])
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, float] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ):
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : List[Any] , ):
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ : Dict , ):
SCREAMING_SNAKE_CASE_: Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Any = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_: str = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_: List[Any] = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: List[Any] = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: int = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: List[str] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: str = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: List[str] = BatchFeature(data={"pixel_values": images} , tensor_type=lowerCAmelCase__)
return encoded_outputs
| 127 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""image_processor""", """tokenizer"""]
lowerCamelCase__ = """BlipImageProcessor"""
lowerCamelCase__ = """AutoTokenizer"""
def __init__( self , lowercase , lowercase , lowercase ):
super().__init__(lowercase , lowercase )
# add QFormer tokenizer
_lowerCamelCase : int = qformer_tokenizer
def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_lowerCamelCase : int = BatchFeature()
if text is not None:
_lowerCamelCase : List[str] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
encoding.update(lowercase )
_lowerCamelCase : List[str] = self.qformer_tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
_lowerCamelCase : List[Any] = qformer_text_encoding.pop('input_ids' )
_lowerCamelCase : Tuple = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_lowerCamelCase : int = self.image_processor(lowercase , return_tensors=lowercase )
encoding.update(lowercase )
return encoding
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names
_lowerCamelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def A_ ( self , lowercase , **lowercase ):
if os.path.isfile(lowercase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase , exist_ok=lowercase )
_lowerCamelCase : Optional[Any] = os.path.join(lowercase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(lowercase )
return super().save_pretrained(lowercase , **lowercase )
@classmethod
def A_ ( cls , lowercase , **lowercase ):
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , subfolder='qformer_tokenizer' )
_lowerCamelCase : Dict = cls._get_arguments_from_pretrained(lowercase , **lowercase )
args.append(lowercase )
return cls(*lowercase ) | 96 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __snake_case ( __lowerCAmelCase ):
a__ = 42
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self , lowercase = 6_55_36 , lowercase = None , lowercase = 2 , lowercase = 2 , lowercase = 0 , lowercase = "fourier" , lowercase = True , lowercase = False , lowercase = 0.0 , lowercase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowercase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowercase = "UNetMidBlock1D" , lowercase = None , lowercase = (32, 32, 64) , lowercase = None , lowercase = 8 , lowercase = 1 , lowercase = False , ) -> List[Any]:
'''simple docstring'''
super().__init__()
a__: int = sample_size
# time
if time_embedding_type == "fourier":
a__: Dict = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowercase , log=lowercase , flip_sin_to_cos=lowercase)
a__: List[Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
a__: Union[str, Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowercase , downscale_freq_shift=lowercase)
a__: List[Any] = block_out_channels[0]
if use_timestep_embedding:
a__: List[Any] = block_out_channels[0] * 4
a__: List[Any] = TimestepEmbedding(
in_channels=lowercase , time_embed_dim=lowercase , act_fn=lowercase , out_dim=block_out_channels[0] , )
a__: Optional[int] = nn.ModuleList([])
a__: Union[str, Any] = None
a__: List[Any] = nn.ModuleList([])
a__: Dict = None
# down
a__: Tuple = in_channels
for i, down_block_type in enumerate(lowercase):
a__: Optional[Any] = output_channel
a__: Optional[int] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
a__: Optional[int] = i == len(lowercase) - 1
a__: Any = get_down_block(
lowercase , num_layers=lowercase , in_channels=lowercase , out_channels=lowercase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowercase)
# mid
a__: int = get_mid_block(
lowercase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowercase , add_downsample=lowercase , )
# up
a__: Tuple = list(reversed(lowercase))
a__: Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
a__: List[Any] = out_channels
else:
a__: Optional[Any] = block_out_channels[0]
for i, up_block_type in enumerate(lowercase):
a__: str = output_channel
a__: Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(lowercase) - 1 else final_upsample_channels
)
a__: Any = i == len(lowercase) - 1
a__: Tuple = get_up_block(
lowercase , num_layers=lowercase , in_channels=lowercase , out_channels=lowercase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowercase)
a__: Tuple = output_channel
# out
a__: Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
a__: Union[str, Any] = get_out_block(
out_block_type=lowercase , num_groups_out=lowercase , embed_dim=block_out_channels[0] , out_channels=lowercase , act_fn=lowercase , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
a__: Optional[int] = timestep
if not torch.is_tensor(lowercase):
a__: Any = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(lowercase) and len(timesteps.shape) == 0:
a__: Tuple = timesteps[None].to(sample.device)
a__: Union[str, Any] = self.time_proj(lowercase)
if self.config.use_timestep_embedding:
a__: Union[str, Any] = self.time_mlp(lowercase)
else:
a__: Dict = timestep_embed[..., None]
a__: Any = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
a__: int = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
a__: Tuple = ()
for downsample_block in self.down_blocks:
a__ , a__: Tuple = downsample_block(hidden_states=lowercase , temb=lowercase)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
a__: str = self.mid_block(lowercase , lowercase)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
a__: List[Any] = down_block_res_samples[-1:]
a__: Optional[Any] = down_block_res_samples[:-1]
a__: List[Any] = upsample_block(lowercase , res_hidden_states_tuple=lowercase , temb=lowercase)
# 5. post-process
if self.out_block:
a__: Union[str, Any] = self.out_block(lowercase , lowercase)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowercase)
| 203 | """simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
torch.manual_seed(0)
a__: str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
torch.manual_seed(0)
a__: List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
a__: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowercase)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Union[str, Any] = self.dummy_uncond_unet
a__: Optional[int] = DDIMScheduler()
a__: Optional[int] = self.dummy_vq_model
a__: Union[str, Any] = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase)
ldm.to(lowercase)
ldm.set_progress_bar_config(disable=lowercase)
a__: str = torch.manual_seed(0)
a__: Dict = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy').images
a__: Union[str, Any] = torch.manual_seed(0)
a__: int = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase)[0]
a__: Union[str, Any] = image[0, -3:, -3:, -1]
a__: int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__: int = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172])
a__: Optional[Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Union[str, Any] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256')
ldm.to(lowercase)
ldm.set_progress_bar_config(disable=lowercase)
a__: List[str] = torch.manual_seed(0)
a__: Optional[int] = ldm(generator=lowercase , num_inference_steps=5 , output_type='numpy').images
a__: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
a__: int = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447])
a__: Any = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
| 203 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15 |
def __lowerCamelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : int ):
"""simple docstring"""
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a :Optional[int] = [p / w for p, w in zip(UpperCAmelCase_ , UpperCAmelCase_ )]
# Creating a copy of the list and sorting profit/weight in ascending order
a :List[Any] = sorted(UpperCAmelCase_ )
# declaring useful variables
a :Dict = len(UpperCAmelCase_ )
a :Tuple = 0
a :List[Any] = 0
a :str = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a :List[Any] = sorted_profit_by_weight[length - i - 1]
a :Optional[Any] = profit_by_weight.index(UpperCAmelCase_ )
a :Optional[int] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Union[str, Any] = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Tuple = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : str = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 94 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase( lowercase_ , lowercase_=1000 ) -> Tuple:
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case_ = n - 1
snake_case_ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case_ = 0
while count < prec:
snake_case_ = random.randint(2 , n - 1 )
snake_case_ = bin_exp_mod(lowercase_ , lowercase_ , lowercase_ )
if b != 1:
snake_case_ = True
for _ in range(lowercase_ ):
if b == n - 1:
snake_case_ = False
break
snake_case_ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCamelCase_ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 34 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase_ = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=8 ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
snake_case_ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __lowerCamelCase ( __snake_case ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(
text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , movq=lowerCamelCase , )
snake_case_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
if latents is None:
snake_case_ = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
snake_case_ = latents.to(lowerCamelCase )
snake_case_ = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , ) -> Any:
snake_case_ = len(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else 1
# get prompt text embeddings
snake_case_ = self.tokenizer(
lowerCamelCase , padding="""max_length""" , truncation=lowerCamelCase , max_length=77 , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="""pt""" , )
snake_case_ = text_inputs.input_ids
snake_case_ = self.tokenizer(lowerCamelCase , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCamelCase , lowerCamelCase ):
snake_case_ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
snake_case_ = text_input_ids.to(lowerCamelCase )
snake_case_ = text_inputs.attention_mask.to(lowerCamelCase )
snake_case_ , snake_case_ = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
snake_case_ = prompt_embeds.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = text_encoder_hidden_states.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = text_mask.repeat_interleave(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
snake_case_ = 42
if negative_prompt is None:
snake_case_ = [""""""] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !='''
f''' {type(lowerCamelCase )}.''' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
snake_case_ = negative_prompt
snake_case_ = self.tokenizer(
lowerCamelCase , padding="""max_length""" , max_length=77 , truncation=lowerCamelCase , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="""pt""" , )
snake_case_ = uncond_input.input_ids.to(lowerCamelCase )
snake_case_ = uncond_input.attention_mask.to(lowerCamelCase )
snake_case_ , snake_case_ = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = negative_prompt_embeds.shape[1]
snake_case_ = negative_prompt_embeds.repeat(1 , lowerCamelCase )
snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCamelCase )
snake_case_ = uncond_text_encoder_hidden_states.shape[1]
snake_case_ = uncond_text_encoder_hidden_states.repeat(1 , lowerCamelCase , 1 )
snake_case_ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowerCamelCase , -1 )
snake_case_ = uncond_text_mask.repeat_interleave(lowerCamelCase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
snake_case_ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
snake_case_ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase_ ( self , lowerCamelCase=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case_ = torch.device(f'''cuda:{gpu_id}''' )
snake_case_ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
snake_case_ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
snake_case_ , snake_case_ = cpu_offload_with_hook(lowerCamelCase , lowerCamelCase , prev_module_hook=lowerCamelCase )
if self.safety_checker is not None:
snake_case_ , snake_case_ = cpu_offload_with_hook(self.safety_checker , lowerCamelCase , prev_module_hook=lowerCamelCase )
# We'll offload the last model manually.
snake_case_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self ) -> List[Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase )
def __call__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 100 , lowerCamelCase = 4.0 , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , ) -> Union[str, Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = len(lowerCamelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}''' )
snake_case_ = self._execution_device
snake_case_ = batch_size * num_images_per_prompt
snake_case_ = guidance_scale > 1.0
snake_case_ , snake_case_ , snake_case_ = self._encode_prompt(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = torch.cat(lowerCamelCase , dim=0 )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = torch.cat(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
snake_case_ = image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = negative_image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowerCamelCase )
self.scheduler.set_timesteps(lowerCamelCase , device=lowerCamelCase )
snake_case_ = self.scheduler.timesteps
snake_case_ = self.unet.config.in_channels
snake_case_ , snake_case_ = get_new_h_w(lowerCamelCase , lowerCamelCase , self.movq_scale_factor )
# create initial latent
snake_case_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowerCamelCase , lowerCamelCase , lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
snake_case_ = self.unet(
sample=lowerCamelCase , timestep=lowerCamelCase , encoder_hidden_states=lowerCamelCase , added_cond_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0]
if do_classifier_free_guidance:
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
snake_case_ , snake_case_ = noise_pred.chunk(2 )
snake_case_ , snake_case_ = variance_pred.chunk(2 )
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(
lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase , ).prev_sample
# post-processing
snake_case_ = self.movq.decode(lowerCamelCase , force_not_quantize=lowerCamelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
snake_case_ = image * 0.5 + 0.5
snake_case_ = image.clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase ) | 34 | 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A_ , A_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A_ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = ["pixel_values"]
def __init__( self : str ,lowercase_ : bool = True ,lowercase_ : Dict[str, int] = None ,lowercase_ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase_ : bool = True ,lowercase_ : Dict[str, int] = None ,lowercase_ : bool = True ,lowercase_ : Union[int, float] = 1 / 2_5_5 ,lowercase_ : bool = True ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : bool = True ,**lowercase_ : Optional[Any] ,):
super().__init__(**lowercase_ )
lowerCAmelCase__ : List[str] = size if size is not None else {'''shortest_edge''': 2_2_4}
lowerCAmelCase__ : Tuple = get_size_dict(lowercase_ ,default_to_square=lowercase_ )
lowerCAmelCase__ : Optional[int] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase__ : Optional[Any] = get_size_dict(lowercase_ ,default_to_square=lowercase_ ,param_name='''crop_size''' )
lowerCAmelCase__ : Dict = do_resize
lowerCAmelCase__ : Optional[int] = size
lowerCAmelCase__ : Dict = resample
lowerCAmelCase__ : Optional[Any] = do_center_crop
lowerCAmelCase__ : Dict = crop_size
lowerCAmelCase__ : Tuple = do_rescale
lowerCAmelCase__ : str = rescale_factor
lowerCAmelCase__ : List[str] = do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ : Optional[Any] = do_convert_rgb
def __lowerCAmelCase ( self : Dict ,lowercase_ : np.ndarray ,lowercase_ : Dict[str, int] ,lowercase_ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Optional[Any] ,):
lowerCAmelCase__ : Dict = get_size_dict(lowercase_ ,default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase__ : Optional[Any] = get_resize_output_image_size(lowercase_ ,size=size['''shortest_edge'''] ,default_to_square=lowercase_ )
return resize(lowercase_ ,size=lowercase_ ,resample=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : np.ndarray ,lowercase_ : Dict[str, int] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : str ,):
lowerCAmelCase__ : List[str] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase_ ,size=(size['''height'''], size['''width''']) ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : np.ndarray ,lowercase_ : Union[int, float] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Union[str, Any] ,):
return rescale(lowercase_ ,scale=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : np.ndarray ,lowercase_ : Union[float, List[float]] ,lowercase_ : Union[float, List[float]] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : int ,):
return normalize(lowercase_ ,mean=lowercase_ ,std=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : ImageInput ,lowercase_ : bool = None ,lowercase_ : Dict[str, int] = None ,lowercase_ : PILImageResampling = None ,lowercase_ : bool = None ,lowercase_ : int = None ,lowercase_ : bool = None ,lowercase_ : float = None ,lowercase_ : bool = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : bool = None ,lowercase_ : Optional[Union[str, TensorType]] = None ,lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST ,**lowercase_ : List[Any] ,):
lowerCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Optional[int] = size if size is not None else self.size
lowerCAmelCase__ : Union[str, Any] = get_size_dict(lowercase_ ,param_name='''size''' ,default_to_square=lowercase_ )
lowerCAmelCase__ : Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Dict = get_size_dict(lowercase_ ,param_name='''crop_size''' ,default_to_square=lowercase_ )
lowerCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : str = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ : Any = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ : Tuple = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ : Optional[int] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase__ : Optional[int] = [self.resize(image=lowercase_ ,size=lowercase_ ,resample=lowercase_ ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Tuple = [self.center_crop(image=lowercase_ ,size=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase__ : Tuple = [self.rescale(image=lowercase_ ,scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase__ : Union[str, Any] = [self.normalize(image=lowercase_ ,mean=lowercase_ ,std=lowercase_ ) for image in images]
lowerCAmelCase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ ,lowercase_ ) for image in images]
lowerCAmelCase__ : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=lowercase_ ,tensor_type=lowercase_ )
| 106 | 1 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = (KDPMaDiscreteScheduler,)
__UpperCAmelCase : List[str] = 1_0
def _lowercase ( self : Optional[Any], **UpperCAmelCase__ : int ):
__lowercase = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a__ )
return config
def _lowercase ( self : Optional[Any] ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a__ )
def _lowercase ( self : Tuple ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001], [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a__, beta_end=a__ )
def _lowercase ( self : List[Any] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a__ )
def _lowercase ( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def _lowercase ( self : Tuple ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(prediction_type="v_prediction" )
__lowercase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(a__, a__ )
__lowercase = model(a__, a__ )
__lowercase = scheduler.step(a__, a__, a__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(a__ ) )
__lowercase = torch.mean(torch.abs(a__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def _lowercase ( self : Dict ):
if torch_device == "mps":
return
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = scheduler.scale_model_input(a__, a__ )
__lowercase = model(a__, a__ )
__lowercase = scheduler.step(a__, a__, a__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(a__ ) )
__lowercase = torch.mean(torch.abs(a__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def _lowercase ( self : Tuple ):
if torch_device == "mps":
return
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps, device=a__ )
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter.to(a__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase = scheduler.scale_model_input(a__, a__ )
__lowercase = model(a__, a__ )
__lowercase = scheduler.step(a__, a__, a__ )
__lowercase = output.prev_sample
__lowercase = torch.sum(torch.abs(a__ ) )
__lowercase = torch.mean(torch.abs(a__ ) )
if str(a__ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 364 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
_a = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
_a = {F"funnel-transformer/{name}": 5_12 for name in _model_names}
_a = {F"funnel-transformer/{name}": {'do_lower_case': True} for name in _model_names}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Optional[Any] = FunnelTokenizer
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : int = 2
def __init__( self : Dict, UpperCAmelCase__ : Dict=None, UpperCAmelCase__ : Optional[int]=None, UpperCAmelCase__ : Dict=True, UpperCAmelCase__ : List[str]="<unk>", UpperCAmelCase__ : Optional[Any]="<sep>", UpperCAmelCase__ : Optional[Any]="<pad>", UpperCAmelCase__ : Union[str, Any]="<cls>", UpperCAmelCase__ : str="<mask>", UpperCAmelCase__ : Optional[Any]="<s>", UpperCAmelCase__ : Tuple="</s>", UpperCAmelCase__ : Union[str, Any]=True, UpperCAmelCase__ : Optional[int]=True, UpperCAmelCase__ : Optional[Any]=None, UpperCAmelCase__ : Dict="##", **UpperCAmelCase__ : List[str], ):
super().__init__(
UpperCAmelCase__, tokenizer_file=UpperCAmelCase__, do_lower_case=UpperCAmelCase__, unk_token=UpperCAmelCase__, sep_token=UpperCAmelCase__, pad_token=UpperCAmelCase__, cls_token=UpperCAmelCase__, mask_token=UpperCAmelCase__, bos_token=UpperCAmelCase__, eos_token=UpperCAmelCase__, clean_text=UpperCAmelCase__, tokenize_chinese_chars=UpperCAmelCase__, strip_accents=UpperCAmelCase__, wordpieces_prefix=UpperCAmelCase__, **UpperCAmelCase__, )
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase", UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents", UpperCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars", UpperCAmelCase__ ) != tokenize_chinese_chars
):
__lowercase = getattr(UpperCAmelCase__, normalizer_state.pop("type" ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**UpperCAmelCase__ )
__lowercase = do_lower_case
def _lowercase ( self : Tuple, UpperCAmelCase__ : Any, UpperCAmelCase__ : Any=None ):
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self : str, UpperCAmelCase__ : List[int], UpperCAmelCase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[str] = None ):
__lowercase = self._tokenizer.model.save(UpperCAmelCase__, name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 144 | 0 |
"""simple docstring"""
from typing import List
import numpy as np
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = {key: len(snake_case_ ) for key, value in gen_kwargs.items() if isinstance(snake_case_ , snake_case_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
__UpperCAmelCase = max(lists_lengths.values() , default=0 )
return max(1 , snake_case_ )
def lowercase__ ( snake_case_ :int , snake_case_ :int ):
__UpperCAmelCase = []
for group_idx in range(snake_case_ ):
__UpperCAmelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__UpperCAmelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__UpperCAmelCase = range(snake_case_ , start + num_shards_to_add )
shards_indices_per_group.append(snake_case_ )
return shards_indices_per_group
def lowercase__ ( snake_case_ :dict , snake_case_ :int ):
__UpperCAmelCase = _number_of_shards_in_gen_kwargs(snake_case_ )
if num_shards == 1:
return [dict(snake_case_ )]
else:
__UpperCAmelCase = _distribute_shards(num_shards=snake_case_ , max_num_jobs=snake_case_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(snake_case_ , snake_case_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(snake_case_ ) )
]
def lowercase__ ( snake_case_ :List[dict] ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , snake_case_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowercase__ ( snake_case_ :np.random.Generator , snake_case_ :dict ):
__UpperCAmelCase = {len(snake_case_ ) for value in gen_kwargs.values() if isinstance(snake_case_ , snake_case_ )}
__UpperCAmelCase = {}
for size in list_sizes:
__UpperCAmelCase = list(range(snake_case_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__UpperCAmelCase = dict(snake_case_ )
for key, value in shuffled_kwargs.items():
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = [value[i] for i in indices_per_size[len(snake_case_ )]]
return shuffled_kwargs
| 332 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 25_00_04
_lowercase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
a__ : Union[str, Any] = MBartaaTokenizer
a__ : List[str] = MBartaaTokenizerFast
a__ : Any = True
a__ : List[str] = True
def a ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : Dict ):
__UpperCAmelCase = '''<s>'''
__UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_lowercase ) , 10_54 )
def a ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def a ( self : str ):
__UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase )
__UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def a ( self : str ):
# fmt: off
__UpperCAmelCase = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def a ( self : str ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__UpperCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
__UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
__UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
a__ : str = "facebook/mbart-large-50-one-to-many-mmt"
a__ : Union[str, Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
a__ : Any = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
a__ : Any = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def a ( cls : Tuple ):
__UpperCAmelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__UpperCAmelCase = 1
return cls
def a ( self : Union[str, Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
def a ( self : Optional[Any] ):
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
__UpperCAmelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
__UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , _lowercase )
__UpperCAmelCase = 10
__UpperCAmelCase = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0]
self.assertEqual(ids[0] , _lowercase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(_lowercase ) , _lowercase )
def a ( self : Optional[int] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowercase )
__UpperCAmelCase = MBartaaTokenizer.from_pretrained(_lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase )
@require_torch
def a ( self : Dict ):
__UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowercase , return_tensors='''pt''' )
__UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors='''pt''' )
__UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=10 , return_tensors='''pt''' )
__UpperCAmelCase = targets['''input_ids''']
__UpperCAmelCase = shift_tokens_right(_lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self : Dict ):
__UpperCAmelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(_lowercase ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 332 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : Tuple ):
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
A__ : Any =len(__snake_case )
A__ : List[str] =max(__snake_case )
A__ : Optional[int] =min(__snake_case )
# create the counting array
A__ : Dict =coll_max + 1 - coll_min
A__ : Optional[int] =[0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1, __snake_case ):
A__ : Dict =counting_arr[i] + counting_arr[i - 1]
# create the output collection
A__ : List[str] =[0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0, __snake_case ) ):
A__ : Any =collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __lowerCamelCase ( __snake_case : str ):
"""simple docstring"""
return "".join([chr(__snake_case ) for i in counting_sort([ord(__snake_case ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
__snake_case : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
__snake_case : Optional[int] = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 362 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'gpt_bigcode'
__snake_case = ['past_key_values']
__snake_case = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str]=5_02_57 , lowerCAmelCase_ : str=10_24 , lowerCAmelCase_ : str=7_68 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]="gelu_pytorch_tanh" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Dict=1e-5 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=5_02_56 , lowerCAmelCase_ : Dict=5_02_56 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=True , **lowerCAmelCase_ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =vocab_size
A__ : Optional[Any] =n_positions
A__ : List[str] =n_embd
A__ : str =n_layer
A__ : Optional[int] =n_head
A__ : Optional[int] =n_inner
A__ : int =activation_function
A__ : int =resid_pdrop
A__ : int =embd_pdrop
A__ : Dict =attn_pdrop
A__ : Any =layer_norm_epsilon
A__ : List[Any] =initializer_range
A__ : Dict =scale_attn_weights
A__ : Any =use_cache
A__ : List[Any] =attention_softmax_in_fpaa
A__ : Optional[int] =scale_attention_softmax_in_fpaa
A__ : Dict =multi_query
A__ : List[str] =bos_token_id
A__ : Any =eos_token_id
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
| 136 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a =16
a =32
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCamelCase : int = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase : int = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase : List[str] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase : Optional[int] = 1_6
elif accelerator.mixed_precision != "no":
__lowerCamelCase : List[Any] = 8
else:
__lowerCamelCase : Any = None
return tokenizer.pad(
lowerCamelCase__ , padding='longest' , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
__lowerCamelCase : Any = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
__lowerCamelCase : str = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a =mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCamelCase__ ) == "1":
__lowerCamelCase : Union[str, Any] = 2
# Initialize accelerator
__lowerCamelCase : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase : Tuple = config['lr']
__lowerCamelCase : List[str] = int(config['num_epochs'] )
__lowerCamelCase : List[Any] = int(config['seed'] )
__lowerCamelCase : int = int(config['batch_size'] )
__lowerCamelCase : List[str] = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCamelCase__ )
def inner_training_loop(lowerCamelCase__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase : Any = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : str = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
# Instantiate scheduler
__lowerCamelCase : int = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCamelCase : Optional[Any] = model(**lowerCamelCase__ )
__lowerCamelCase : Tuple = outputs.loss
accelerator.backward(lowerCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase : List[str] = model(**lowerCamelCase__ )
__lowerCamelCase : str = outputs.logits.argmax(dim=-1 )
__lowerCamelCase , __lowerCamelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
__lowerCamelCase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCamelCase__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__lowerCamelCase : Optional[int] = parser.parse_args()
__lowerCamelCase : Optional[int] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 73 | """simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : list[int] ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(_UpperCamelCase , (list, tuple) ) or not all(
isinstance(_UpperCamelCase , _UpperCamelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
snake_case = snake_case = snake_case = numbers[0]
for i in range(1 , len(_UpperCamelCase ) ):
# update the maximum and minimum subarray products
snake_case = numbers[i]
if number < 0:
snake_case ,snake_case = min_till_now, max_till_now
snake_case = max(_UpperCamelCase , max_till_now * number )
snake_case = min(_UpperCamelCase , min_till_now * number )
# update the maximum product found till now
snake_case = max(_UpperCamelCase , _UpperCamelCase )
return max_prod
| 150 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple ='''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase : Optional[int] =Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('''RGB''' )
return image
def lowerCAmelCase_ ( __lowerCAmelCase )-> Dict:
'''simple docstring'''
UpperCAmelCase : Dict =[]
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
'''simple docstring'''
UpperCAmelCase : Dict =dct.pop(__lowerCAmelCase )
UpperCAmelCase : Optional[int] =val
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> int:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase : List[Any] =state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase : str =state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase : Union[str, Any] =torch.cat((q_bias, torch.zeros_like(__lowerCAmelCase , requires_grad=__lowerCAmelCase ), v_bias) )
UpperCAmelCase : Optional[int] =qkv_bias
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =3_64 if '''coco''' in model_name else 2_24
UpperCAmelCase : Optional[Any] =BlipaVisionConfig(image_size=__lowerCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase : int =OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowerCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase : Optional[Any] =OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowerCAmelCase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase : List[str] =TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase : Tuple =TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase : Dict =BlipaConfig(vision_config=__lowerCAmelCase , text_config=__lowerCAmelCase )
return config, image_size
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False )-> int:
'''simple docstring'''
UpperCAmelCase : str =(
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase : Tuple =tokenizer('''\n''' , add_special_tokens=__lowerCAmelCase ).input_ids[0]
UpperCAmelCase , UpperCAmelCase : int =get_blipa_config(__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
UpperCAmelCase : Dict =BlipaForConditionalGeneration(__lowerCAmelCase ).eval()
UpperCAmelCase : Optional[Any] ={
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase , UpperCAmelCase : Tuple =model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase : Dict ='''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] =load_model_and_preprocess(
name=__lowerCAmelCase , model_type=__lowerCAmelCase , is_eval=__lowerCAmelCase , device=__lowerCAmelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase : Dict =original_model.state_dict()
UpperCAmelCase : str =create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase : List[Any] =state_dict.pop(__lowerCAmelCase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase : Optional[int] =key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase : Optional[int] =key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase : List[str] =key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase : Optional[int] =key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase : List[str] =key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase : List[str] =key.replace('''t5''' , '''language''' )
UpperCAmelCase : int =val
# read in qv biases
read_in_q_v_bias(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase : List[str] =hf_model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert len(__lowerCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase : Tuple =load_demo_image()
UpperCAmelCase : Tuple =vis_processors['''eval'''](__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
UpperCAmelCase : int =tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowerCAmelCase )
# create processor
UpperCAmelCase : Any =BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
UpperCAmelCase : Any =BlipaProcessor(image_processor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCAmelCase : int =processor(images=__lowerCAmelCase , return_tensors='''pt''' ).pixel_values.to(__lowerCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase )
original_model.to(__lowerCAmelCase )
hf_model.to(__lowerCAmelCase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase : Optional[int] =original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase : str =hf_model(__lowerCAmelCase , __lowerCAmelCase ).logits
else:
UpperCAmelCase : Optional[int] =original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase : Dict =input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
UpperCAmelCase : Optional[Any] =hf_model(__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase : Any =torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=__lowerCAmelCase )
assert torch.allclose(logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase : List[str] =torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=__lowerCAmelCase )
else:
# cast to same type
UpperCAmelCase : str =logits.dtype
assert torch.allclose(original_logits.to(__lowerCAmelCase ) , __lowerCAmelCase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase : Any =''''''
UpperCAmelCase : Dict =tokenizer(__lowerCAmelCase , return_tensors='''pt''' ).input_ids.to(__lowerCAmelCase )
UpperCAmelCase : int =original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase : Optional[Any] =hf_model.generate(
__lowerCAmelCase , __lowerCAmelCase , do_sample=__lowerCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowerCAmelCase )
UpperCAmelCase : int =input_ids.shape[1]
UpperCAmelCase : List[Any] =processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowerCAmelCase )
UpperCAmelCase : Optional[int] =[text.strip() for text in output_text]
print('''HF generation:''' , __lowerCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 78 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = ["""image_processor""", """tokenizer"""]
__lowerCamelCase : Union[str, Any] = """CLIPImageProcessor"""
__lowerCamelCase : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , snake_case__ , )
UpperCAmelCase : int =kwargs.pop('''feature_extractor''' )
UpperCAmelCase : Tuple =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase : List[Any] =self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
UpperCAmelCase : Tuple =self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
UpperCAmelCase : List[Any] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.tokenizer.model_input_names
UpperCAmelCase : Union[str, Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , snake_case__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , snake_case__ , )
return self.image_processor
| 78 | 1 |
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
_A : Union[str, Any] ='''1'''
_A : str ='''0'''
_A : Dict ='''1'''
_A : int =ort.SessionOptions()
_A : str =ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
_A : Union[str, Any] =['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
_A : Any =ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
_A : int =ort.RunOptions()
_A : int =128
_A : List[str] =1
_A : Optional[Any] =np.ones((batch, sequence), dtype=np.intaa)
_A : List[str] =np.ones((batch, sequence), dtype=np.intaa)
_A : str =np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
_A : Optional[Any] =time.time()
_A : List[str] =2_000
_A : str ={}
for iter in range(max_iters):
_A : Optional[Any] =sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1_000 / max_iters))
| 41 | import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_UpperCAmelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
_UpperCAmelCase = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
_UpperCAmelCase = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
_UpperCAmelCase = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
_UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image)
_UpperCAmelCase = np.expand_dims(test_image, axis=0)
_UpperCAmelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_UpperCAmelCase = """Normal"""
if result[0][0] == 1:
_UpperCAmelCase = """Abnormality detected"""
| 140 | 0 |
from math import ceil
def A ( _lowerCamelCase = 1_001 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_lowerCAmelCase : Any = 2 * i + 1
_lowerCAmelCase : List[str] = 2 * i
_lowerCAmelCase : Union[str, Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_snake_case = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 300 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[str] = 'swin2sr'
lowerCAmelCase : str = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any] ,_UpperCAmelCase : Tuple=64 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : Any=180 ,_UpperCAmelCase : Optional[Any]=[6, 6, 6, 6, 6, 6] ,_UpperCAmelCase : Any=[6, 6, 6, 6, 6, 6] ,_UpperCAmelCase : int=8 ,_UpperCAmelCase : Any=2.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : Tuple=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : List[str]="gelu" ,_UpperCAmelCase : Tuple=False ,_UpperCAmelCase : Optional[int]=0.02 ,_UpperCAmelCase : Union[str, Any]=1E-5 ,_UpperCAmelCase : int=2 ,_UpperCAmelCase : Tuple=1.0 ,_UpperCAmelCase : Union[str, Any]="1conv" ,_UpperCAmelCase : Tuple="pixelshuffle" ,**_UpperCAmelCase : List[str] ,):
super().__init__(**_UpperCAmelCase )
_a : List[str] = image_size
_a : Dict = patch_size
_a : Optional[int] = num_channels
_a : Optional[int] = embed_dim
_a : Union[str, Any] = depths
_a : Optional[int] = len(_UpperCAmelCase )
_a : Optional[Any] = num_heads
_a : str = window_size
_a : Optional[Any] = mlp_ratio
_a : Optional[int] = qkv_bias
_a : Tuple = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Any = drop_path_rate
_a : str = hidden_act
_a : Tuple = use_absolute_embeddings
_a : Dict = layer_norm_eps
_a : Any = initializer_range
_a : Optional[Any] = upscale
_a : int = img_range
_a : Union[str, Any] = resi_connection
_a : int = upsampler
| 89 |
'''simple docstring'''
from itertools import count
def a__ ( a__ = 50 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [1] * min_block_length
for n in count(a__ ):
fill_count_functions.append(1 )
for block_length in range(a__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 267 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 42
class a ( nn.Module ):
def __init__( self , __magic_name__=3 , __magic_name__=3 , __magic_name__=("DownEncoderBlock2D",) , __magic_name__=(64,) , __magic_name__=2 , __magic_name__=32 , __magic_name__="silu" , __magic_name__=True , ) -> str:
super().__init__()
_a = layers_per_block
_a = torch.nn.Convad(
__magic_name__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_a = None
_a = nn.ModuleList([] )
# down
_a = block_out_channels[0]
for i, down_block_type in enumerate(__magic_name__ ):
_a = output_channel
_a = block_out_channels[i]
_a = i == len(__magic_name__ ) - 1
_a = get_down_block(
__magic_name__ , num_layers=self.layers_per_block , in_channels=__magic_name__ , out_channels=__magic_name__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__magic_name__ , resnet_groups=__magic_name__ , attention_head_dim=__magic_name__ , temb_channels=__magic_name__ , )
self.down_blocks.append(__magic_name__ )
# mid
_a = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__magic_name__ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=__magic_name__ , temb_channels=__magic_name__ , )
# out
_a = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__magic_name__ , eps=1e-6 )
_a = nn.SiLU()
_a = 2 * out_channels if double_z else out_channels
_a = nn.Convad(block_out_channels[-1] , __magic_name__ , 3 , padding=1 )
_a = False
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[Any]:
_a = x
_a = self.conv_in(__magic_name__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__magic_name__ ):
def custom_forward(*__magic_name__ ):
return module(*__magic_name__ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
_a = torch.utils.checkpoint.checkpoint(
create_custom_forward(__magic_name__ ) , __magic_name__ , use_reentrant=__magic_name__ )
# middle
_a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __magic_name__ , use_reentrant=__magic_name__ )
else:
for down_block in self.down_blocks:
_a = torch.utils.checkpoint.checkpoint(create_custom_forward(__magic_name__ ) , __magic_name__ )
# middle
_a = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __magic_name__ )
else:
# down
for down_block in self.down_blocks:
_a = down_block(__magic_name__ )
# middle
_a = self.mid_block(__magic_name__ )
# post-process
_a = self.conv_norm_out(__magic_name__ )
_a = self.conv_act(__magic_name__ )
_a = self.conv_out(__magic_name__ )
return sample
class a ( nn.Module ):
def __init__( self , __magic_name__=3 , __magic_name__=3 , __magic_name__=("UpDecoderBlock2D",) , __magic_name__=(64,) , __magic_name__=2 , __magic_name__=32 , __magic_name__="silu" , __magic_name__="group" , ) -> List[Any]:
super().__init__()
_a = layers_per_block
_a = nn.Convad(
__magic_name__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_a = None
_a = nn.ModuleList([] )
_a = in_channels if norm_type == 'spatial' else None
# mid
_a = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__magic_name__ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__magic_name__ , temb_channels=__magic_name__ , )
# up
_a = list(reversed(__magic_name__ ) )
_a = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__magic_name__ ):
_a = output_channel
_a = reversed_block_out_channels[i]
_a = i == len(__magic_name__ ) - 1
_a = get_up_block(
__magic_name__ , num_layers=self.layers_per_block + 1 , in_channels=__magic_name__ , out_channels=__magic_name__ , prev_output_channel=__magic_name__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__magic_name__ , resnet_groups=__magic_name__ , attention_head_dim=__magic_name__ , temb_channels=__magic_name__ , resnet_time_scale_shift=__magic_name__ , )
self.up_blocks.append(__magic_name__ )
_a = output_channel
# out
if norm_type == "spatial":
_a = SpatialNorm(block_out_channels[0] , __magic_name__ )
else:
_a = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__magic_name__ , eps=1e-6 )
_a = nn.SiLU()
_a = nn.Convad(block_out_channels[0] , __magic_name__ , 3 , padding=1 )
_a = False
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=None ) -> str:
_a = z
_a = self.conv_in(__magic_name__ )
_a = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__magic_name__ ):
def custom_forward(*__magic_name__ ):
return module(*__magic_name__ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
_a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __magic_name__ , __magic_name__ , use_reentrant=__magic_name__ )
_a = sample.to(__magic_name__ )
# up
for up_block in self.up_blocks:
_a = torch.utils.checkpoint.checkpoint(
create_custom_forward(__magic_name__ ) , __magic_name__ , __magic_name__ , use_reentrant=__magic_name__ )
else:
# middle
_a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __magic_name__ , __magic_name__ )
_a = sample.to(__magic_name__ )
# up
for up_block in self.up_blocks:
_a = torch.utils.checkpoint.checkpoint(create_custom_forward(__magic_name__ ) , __magic_name__ , __magic_name__ )
else:
# middle
_a = self.mid_block(__magic_name__ , __magic_name__ )
_a = sample.to(__magic_name__ )
# up
for up_block in self.up_blocks:
_a = up_block(__magic_name__ , __magic_name__ )
# post-process
if latent_embeds is None:
_a = self.conv_norm_out(__magic_name__ )
else:
_a = self.conv_norm_out(__magic_name__ , __magic_name__ )
_a = self.conv_act(__magic_name__ )
_a = self.conv_out(__magic_name__ )
return sample
class a ( nn.Module ):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="random" , __magic_name__=False , __magic_name__=True ) -> Union[str, Any]:
super().__init__()
_a = n_e
_a = vq_embed_dim
_a = beta
_a = legacy
_a = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_a = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
_a = self.used.shape[0]
_a = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_a = self.re_embed
_a = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
_a = n_e
_a = sane_index_shape
def __UpperCAmelCase ( self , __magic_name__ ) -> Union[str, Any]:
_a = inds.shape
assert len(__magic_name__ ) > 1
_a = inds.reshape(ishape[0] , -1 )
_a = self.used.to(__magic_name__ )
_a = (inds[:, :, None] == used[None, None, ...]).long()
_a = match.argmax(-1 )
_a = match.sum(2 ) < 1
if self.unknown_index == "random":
_a = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_a = self.unknown_index
return new.reshape(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> Dict:
_a = inds.shape
assert len(__magic_name__ ) > 1
_a = inds.reshape(ishape[0] , -1 )
_a = self.used.to(__magic_name__ )
if self.re_embed > self.used.shape[0]: # extra token
_a = 0 # simply set to zero
_a = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __magic_name__ )
return back.reshape(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[Any]:
# reshape z -> (batch, height, width, channel) and flatten
_a = z.permute(0 , 2 , 3 , 1 ).contiguous()
_a = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_a = torch.argmin(torch.cdist(__magic_name__ , self.embedding.weight ) , dim=1 )
_a = self.embedding(__magic_name__ ).view(z.shape )
_a = None
_a = None
# compute loss for embedding
if not self.legacy:
_a = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_a = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_a = z + (z_q - z).detach()
# reshape back to match original input shape
_a = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_a = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_a = self.remap_to_used(__magic_name__ )
_a = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_a = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> List[str]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
_a = indices.reshape(shape[0] , -1 ) # add batch axis
_a = self.unmap_to_all(__magic_name__ )
_a = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_a = self.embedding(__magic_name__ )
if shape is not None:
_a = z_q.view(__magic_name__ )
# reshape back to match original input shape
_a = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , __magic_name__ , __magic_name__=False ) -> Dict:
_a = parameters
_a , _a = torch.chunk(__magic_name__ , 2 , dim=1 )
_a = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
_a = deterministic
_a = torch.exp(0.5 * self.logvar )
_a = torch.exp(self.logvar )
if self.deterministic:
_a = _a = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCAmelCase ( self , __magic_name__ = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
_a = randn_tensor(
self.mean.shape , generator=__magic_name__ , device=self.parameters.device , dtype=self.parameters.dtype )
_a = self.mean + self.std * sample
return x
def __UpperCAmelCase ( self , __magic_name__=None ) -> Dict:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
_a = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[int]:
return self.mean
| 104 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ : Tuple = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = ["""input_features"""]
def __init__( self , __magic_name__=80 , __magic_name__=1_60_00 , __magic_name__=1_60 , __magic_name__=30 , __magic_name__=4_00 , __magic_name__=0.0 , __magic_name__=False , **__magic_name__ , ) -> Optional[int]:
super().__init__(
feature_size=__magic_name__ , sampling_rate=__magic_name__ , padding_value=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
_a = n_fft
_a = hop_length
_a = chunk_length
_a = chunk_length * sampling_rate
_a = self.n_samples // hop_length
_a = sampling_rate
_a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__magic_name__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=__magic_name__ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self , __magic_name__ ) -> np.ndarray:
_a = spectrogram(
__magic_name__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
_a = log_spec[:, :-1]
_a = np.maximum(__magic_name__ , log_spec.max() - 8.0 )
_a = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __UpperCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
_a = np.array(__magic_name__ , np.intaa )
_a = []
for vector, length in zip(__magic_name__ , attention_mask.sum(-1 ) ):
_a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_a = padding_value
normed_input_values.append(__magic_name__ )
else:
_a = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "max_length" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a = isinstance(__magic_name__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_a = is_batched_numpy or (
isinstance(__magic_name__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__magic_name__ , np.ndarray ):
_a = np.asarray(__magic_name__ , dtype=np.floataa )
elif isinstance(__magic_name__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [np.asarray([raw_speech] ).T]
_a = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
_a = self.pad(
__magic_name__ , padding=__magic_name__ , max_length=max_length if max_length else self.n_samples , truncation=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_a = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
_a = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
_a = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
_a = [self._np_extract_fbank_features(__magic_name__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , __magic_name__ ):
_a = [np.asarray(__magic_name__ , dtype=np.floataa ) for feature in input_features]
else:
_a = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_a = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
_a = padded_inputs.convert_to_tensors(__magic_name__ )
return padded_inputs
def __UpperCAmelCase ( self ) -> Dict[str, Any]:
_a = copy.deepcopy(self.__dict__ )
_a = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 104 | 1 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowerCamelCase_ ( snake_case__ ):
lowerCAmelCase__ = 'char'
lowerCAmelCase__ = 'bpe'
lowerCAmelCase__ = 'wp'
UpperCamelCase__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowerCamelCase_ ( snake_case__ ):
lowerCAmelCase__ = ['image_processor', 'char_tokenizer']
lowerCAmelCase__ = 'ViTImageProcessor'
lowerCAmelCase__ = 'MgpstrTokenizer'
def __init__( self : Optional[int] , _A : List[str]=None , _A : Any=None , **_A : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase__ : str = kwargs.pop('''feature_extractor''' )
UpperCAmelCase__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
UpperCAmelCase__ : Any = tokenizer
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained('''gpt2''' )
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None , **_A : List[str] ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
UpperCAmelCase__ : Optional[int] = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None:
UpperCAmelCase__ : Optional[int] = self.char_tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCAmelCase__ : Any = encodings['''input_ids''']
return inputs
def lowercase_ ( self : Optional[int] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = sequences
UpperCAmelCase__ : Any = char_preds.size(0 )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self._decode_helper(__snake_case , '''char''' )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self._decode_helper(__snake_case , '''bpe''' )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self._decode_helper(__snake_case , '''wp''' )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Optional[int] = []
for i in range(__snake_case ):
UpperCAmelCase__ : Dict = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCAmelCase__ : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCAmelCase__ : List[str] = scores.index(max(__snake_case ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : List[str] = final_strs
UpperCAmelCase__ : Union[str, Any] = final_scores
UpperCAmelCase__ : Tuple = char_strs
UpperCAmelCase__ : int = bpe_strs
UpperCAmelCase__ : Union[str, Any] = wp_strs
return out
def lowercase_ ( self : int , _A : List[Any] , _A : Dict ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
UpperCAmelCase__ : Optional[int] = self.char_decode
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Optional[Any] = '''[s]'''
elif format == DecodeType.BPE:
UpperCAmelCase__ : str = self.bpe_decode
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Dict = '''#'''
elif format == DecodeType.WORDPIECE:
UpperCAmelCase__ : Dict = self.wp_decode
UpperCAmelCase__ : Dict = 102
UpperCAmelCase__ : Tuple = '''[SEP]'''
else:
raise ValueError(f"""Format {format} is not supported.""" )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = [], []
UpperCAmelCase__ : List[Any] = pred_logits.size(0 )
UpperCAmelCase__ : str = pred_logits.size(1 )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = pred_logits.topk(1 , dim=-1 , largest=__snake_case , sorted=__snake_case )
UpperCAmelCase__ : Union[str, Any] = preds_index.view(-1 , __snake_case )[:, 1:]
UpperCAmelCase__ : int = decoder(__snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = torch.nn.functional.softmax(__snake_case , dim=2 ).max(dim=2 )
UpperCAmelCase__ : Tuple = preds_max_prob[:, 1:]
for index in range(__snake_case ):
UpperCAmelCase__ : Optional[Any] = preds_str[index].find(__snake_case )
UpperCAmelCase__ : str = preds_str[index][:pred_eos]
UpperCAmelCase__ : Tuple = preds_index[index].cpu().tolist()
UpperCAmelCase__ : List[Any] = pred_index.index(__snake_case ) if eos_token in pred_index else -1
UpperCAmelCase__ : Optional[Any] = preds_max_prob[index][: pred_eos_index + 1]
UpperCAmelCase__ : List[Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__snake_case )
conf_scores.append(__snake_case )
return dec_strs, conf_scores
def lowercase_ ( self : Union[str, Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__snake_case )]
return decode_strs
def lowercase_ ( self : List[str] , _A : Tuple ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__snake_case )
def lowercase_ ( self : Union[str, Any] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__snake_case )]
return decode_strs
| 181 |
_SCREAMING_SNAKE_CASE : Optional[Any] = tuple[float, float, float]
_SCREAMING_SNAKE_CASE : Optional[Any] = tuple[float, float, float]
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = end_pointa[0] - end_pointa[0]
snake_case = end_pointa[1] - end_pointa[1]
snake_case = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = ab[1] * ac[2] - ab[2] * ac[1] # *i
snake_case = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
snake_case = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
return tuple(round(UpperCamelCase_ ,UpperCamelCase_ ) for x in vector ) == (0, 0, 0)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 10 ):
"""simple docstring"""
snake_case = create_vector(UpperCamelCase_ ,UpperCamelCase_ )
snake_case = create_vector(UpperCamelCase_ ,UpperCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCamelCase_ ,UpperCamelCase_ ) ,UpperCamelCase_ )
| 127 | 0 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''sentencepiece''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''sentencepiece'''] )
| 39 |
def UpperCamelCase__( UpperCamelCase__ : str )->str:
A__ = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCamelCase__( UpperCamelCase__ : str )->dict[str, str]:
A__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
A__ = remove_duplicates(key.upper() )
A__ = len(UpperCamelCase__ )
# First fill cipher with key characters
A__ = {alphabet[i]: char for i, char in enumerate(UpperCamelCase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCamelCase__ ) , 26 ):
A__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
A__ = alphabet[i - offset]
A__ = char
return cipher_alphabet
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : dict[str, str] )->str:
return "".join(cipher_map.get(UpperCamelCase__ , UpperCamelCase__ ) for ch in message.upper() )
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : dict[str, str] )->str:
A__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCamelCase__ , UpperCamelCase__ ) for ch in message.upper() )
def UpperCamelCase__( )->None:
A__ = input('''Enter message to encode or decode: ''' ).strip()
A__ = input('''Enter keyword: ''' ).strip()
A__ = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
A__ = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
A__ = create_cipher_map(UpperCamelCase__ )
print(func(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 39 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__snake_case = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 203 |
"""simple docstring"""
def __lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Dict = []
snake_case : List[Any] = 1
while len(lowercase ) < 1e6:
constant.append(str(lowercase ) )
i += 1
snake_case : Tuple = "".join(lowercase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 203 | 1 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
__lowercase = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _A ( A__ ):
"""simple docstring"""
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(A__ )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(A__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(A__ ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def _A ( A__ , A__ ):
"""simple docstring"""
return "".join(cipher_map.get(A__ , A__ ) for ch in message.upper() )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(A__ , A__ ) for ch in message.upper() )
def _A ( ):
"""simple docstring"""
__lowercase = input('''Enter message to encode or decode: ''' ).strip()
__lowercase = input('''Enter keyword: ''' ).strip()
__lowercase = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__lowercase = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__lowercase = create_cipher_map(A__ )
print(func(A__ , A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 352 |
'''simple docstring'''
import math
def _A ( A__ = 100 ):
"""simple docstring"""
__lowercase = sum(i * i for i in range(1 , n + 1 ) )
__lowercase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 52 | 0 |
'''simple docstring'''
def snake_case_ (_a : int ):
UpperCAmelCase = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ (_a : int = 5_0_0_0 ):
UpperCAmelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , _a )]
for i, pentagonal_i in enumerate(_a ):
for j in range(_a , len(_a ) ):
UpperCAmelCase = pentagonal_nums[j]
UpperCAmelCase = pentagonal_i + pentagonal_j
UpperCAmelCase = pentagonal_j - pentagonal_i
if is_pentagonal(_a ) and is_pentagonal(_a ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 34 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
A =logging.get_logger(__name__)
class _a ( __a ):
__a : str = ["""pixel_values"""]
def __init__( self : Optional[int] , lowercase : bool = True , lowercase : Optional[Dict[str, int]] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BICUBIC , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Any , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase = get_resize_output_image_size(lowercase , size=size['''shortest_edge'''] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : int , ):
'''simple docstring'''
UpperCAmelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowercase , size=(size['''height'''], size['''width''']) , data_format=lowercase , **lowercase )
def A ( self : Tuple , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Union[str, Any] , ):
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Dict , ):
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(lowercase , param_name='''crop_size''' )
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase ):
UpperCAmelCase = target_sizes.numpy()
UpperCAmelCase = []
for idx in range(len(lowercase ) ):
UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase )
UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
UpperCAmelCase = logits.argmax(dim=1 )
UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = 1
_lowercase : int = 3
_lowercase : List[Any] = (32, 32)
_lowercase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCamelCase )
return image
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : Tuple = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_UpperCamelCase )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
def extract(*_UpperCamelCase , **_UpperCamelCase ):
class a__ :
def __init__( self ):
"""simple docstring"""
_lowercase : int = torch.ones([0] )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
self.pixel_values.to(_UpperCamelCase )
return self
return Out()
return extract
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : Optional[Any] = self.dummy_cond_unet
_lowercase : List[str] = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
_lowercase : Optional[int] = self.dummy_vae
_lowercase : Tuple = self.dummy_text_encoder
_lowercase : List[str] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_lowercase : Dict = 77
_lowercase : int = self.dummy_image.to(_UpperCamelCase )
_lowercase : int = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowercase : Optional[int] = AltDiffusionImgaImgPipeline(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , vae=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , safety_checker=_UpperCamelCase , feature_extractor=self.dummy_extractor , )
_lowercase : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_UpperCamelCase )
_lowercase : Dict = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : Optional[Any] = "A painting of a squirrel eating a burger"
_lowercase : str = torch.Generator(device=_UpperCamelCase ).manual_seed(0 )
_lowercase : str = alt_pipe(
[prompt] , generator=_UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=_UpperCamelCase , )
_lowercase : Any = output.images
_lowercase : Dict = torch.Generator(device=_UpperCamelCase ).manual_seed(0 )
_lowercase : Union[str, Any] = alt_pipe(
[prompt] , generator=_UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
_lowercase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : List[str] = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = self.dummy_cond_unet
_lowercase : Optional[int] = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
_lowercase : Optional[Any] = self.dummy_vae
_lowercase : List[str] = self.dummy_text_encoder
_lowercase : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_lowercase : List[str] = 77
_lowercase : List[Any] = self.dummy_image.to(_UpperCamelCase )
# put models in fp16
_lowercase : Optional[Any] = unet.half()
_lowercase : List[Any] = vae.half()
_lowercase : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
_lowercase : List[Any] = AltDiffusionImgaImgPipeline(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , vae=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , safety_checker=_UpperCamelCase , feature_extractor=self.dummy_extractor , )
_lowercase : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_UpperCamelCase )
_lowercase : Tuple = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : str = "A painting of a squirrel eating a burger"
_lowercase : Any = torch.manual_seed(0 )
_lowercase : Tuple = alt_pipe(
[prompt] , generator=_UpperCamelCase , num_inference_steps=2 , output_type="np" , image=_UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
_lowercase : Optional[Any] = init_image.resize((760, 504) )
_lowercase : Any = "BAAI/AltDiffusion"
_lowercase : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCamelCase , safety_checker=_UpperCamelCase , )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
_lowercase : Dict = "A fantasy landscape, trending on artstation"
_lowercase : Any = torch.manual_seed(0 )
_lowercase : int = pipe(
prompt=_UpperCamelCase , image=_UpperCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_UpperCamelCase , output_type="np" , )
_lowercase : Optional[Any] = output.images[0]
_lowercase : Tuple = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_lowercase : Optional[int] = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
_lowercase : str = init_image.resize((768, 512) )
_lowercase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
_lowercase : List[Any] = "BAAI/AltDiffusion"
_lowercase : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCamelCase , safety_checker=_UpperCamelCase , )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
_lowercase : Optional[Any] = "A fantasy landscape, trending on artstation"
_lowercase : List[Any] = torch.manual_seed(0 )
_lowercase : Optional[Any] = pipe(
prompt=_UpperCamelCase , image=_UpperCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_UpperCamelCase , output_type="np" , )
_lowercase : str = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 199 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Any = ['image_processor', 'tokenizer']
_SCREAMING_SNAKE_CASE : Dict = 'BlipImageProcessor'
_SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
super().__init__(_UpperCamelCase , _UpperCamelCase )
# add QFormer tokenizer
_lowercase : List[Any] = qformer_tokenizer
def __call__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_lowercase : str = BatchFeature()
if text is not None:
_lowercase : Dict = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
encoding.update(_UpperCamelCase )
_lowercase : Dict = self.qformer_tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
_lowercase : Union[str, Any] = qformer_text_encoding.pop("input_ids" )
_lowercase : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_lowercase : List[Any] = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = self.tokenizer.model_input_names
_lowercase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _lowerCamelCase ( self , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
if os.path.isfile(_UpperCamelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
_lowercase : Union[str, Any] = os.path.join(_UpperCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(_UpperCamelCase )
return super().save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : List[Any] = AutoTokenizer.from_pretrained(_UpperCamelCase , subfolder="qformer_tokenizer" )
_lowercase : Optional[Any] = cls._get_arguments_from_pretrained(_UpperCamelCase , **_UpperCamelCase )
args.append(_UpperCamelCase )
return cls(*_UpperCamelCase )
| 199 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_5_5 , a = True , a = IMAGENET_DEFAULT_MEAN , a = IMAGENET_DEFAULT_STD , **a , ) -> None:
super().__init__(**a )
lowercase__ : int = size if size is not None else {'shortest_edge': 2_2_4}
lowercase__ : List[Any] = get_size_dict(a , default_to_square=a )
lowercase__ : Tuple = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowercase__ : int = get_size_dict(a , param_name='crop_size' )
lowercase__ : int = do_resize
lowercase__ : Optional[Any] = size
lowercase__ : int = resample
lowercase__ : Union[str, Any] = do_center_crop
lowercase__ : str = crop_size
lowercase__ : Union[str, Any] = do_rescale
lowercase__ : Optional[int] = rescale_factor
lowercase__ : Tuple = do_normalize
lowercase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _UpperCAmelCase ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ) -> np.ndarray:
lowercase__ : int = get_size_dict(a , default_to_square=a )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase__ : int = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
lowercase__ : Tuple = get_resize_output_image_size(a , size=a , default_to_square=a )
lowercase__ : Any = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
a , size=(size_dict['height'], size_dict['width']) , resample=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a = None , **a , ) -> np.ndarray:
lowercase__ : Optional[Any] = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(a , size=(size['height'], size['width']) , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a = None , **a , ) -> np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a , a = None , **a , ) -> np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> BatchFeature:
lowercase__ : int = do_resize if do_resize is not None else self.do_resize
lowercase__ : Optional[Any] = resample if resample is not None else self.resample
lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : str = image_mean if image_mean is not None else self.image_mean
lowercase__ : Any = image_std if image_std is not None else self.image_std
lowercase__ : Dict = size if size is not None else self.size
lowercase__ : Tuple = get_size_dict(a , default_to_square=a )
lowercase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : Tuple = get_size_dict(a , param_name='crop_size' )
lowercase__ : List[str] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase__ : Optional[int] = [self.resize(a , a , a ) for image in images]
if do_center_crop:
lowercase__ : List[Any] = [self.center_crop(a , a ) for image in images]
if do_rescale:
lowercase__ : Optional[Any] = [self.rescale(a , a ) for image in images]
if do_normalize:
lowercase__ : Dict = [self.normalize(a , a , a ) for image in images]
lowercase__ : List[Any] = [to_channel_dimension_format(a , a ) for image in images]
lowercase__ : Any = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
| 77 |
"""simple docstring"""
A__ : Optional[int] = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 144 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Dict , _A : List[Any] , _A : str=13 , _A : Optional[Any]=7 , _A : Tuple=True , _A : Optional[int]=True , _A : List[Any]=True , _A : Dict=True , _A : str=99 , _A : Optional[Any]=32 , _A : Any=5 , _A : List[str]=4 , _A : List[Any]=37 , _A : List[Any]="gelu" , _A : Optional[Any]=0.1 , _A : int=0.1 , _A : List[str]=512 , _A : List[Any]=16 , _A : Union[str, Any]=2 , _A : str=0.02 , _A : Tuple=False , _A : Tuple=True , _A : Dict="None" , _A : Optional[int]=3 , _A : Dict=4 , _A : str=None , ) -> Any:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Union[str, Any] = seq_length
__magic_name__ : int = is_training
__magic_name__ : str = use_input_mask
__magic_name__ : List[Any] = use_token_type_ids
__magic_name__ : int = use_labels
__magic_name__ : Optional[int] = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : int = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : Union[str, Any] = intermediate_size
__magic_name__ : List[str] = hidden_act
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : str = attention_probs_dropout_prob
__magic_name__ : Any = max_position_embeddings
__magic_name__ : Dict = type_vocab_size
__magic_name__ : Optional[int] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : Dict = num_labels
__magic_name__ : Dict = num_choices
__magic_name__ : Any = relative_attention
__magic_name__ : str = position_biased_input
__magic_name__ : List[Any] = pos_att_type
__magic_name__ : List[Any] = scope
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_input_mask:
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__magic_name__ : Tuple = None
if self.use_token_type_ids:
__magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : int = None
__magic_name__ : int = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCAmelCase ( self : Any , _A : Optional[int] ) -> str:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCAmelCase ( self : List[str] , _A : Any , _A : Union[str, Any] , _A : Any , _A : Optional[int] , _A : Union[str, Any] , _A : int , _A : str ) -> Optional[int]:
__magic_name__ : int = DebertaVaModel(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A )[0]
__magic_name__ : Union[str, Any] = model(_A , token_type_ids=_A )[0]
__magic_name__ : str = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any] , _A : Any , _A : str , _A : Any , _A : Optional[int] , _A : str , _A : Any ) -> int:
__magic_name__ : List[Any] = DebertaVaForMaskedLM(config=_A )
model.to(_A )
model.eval()
__magic_name__ : int = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : Any , _A : Tuple , _A : Dict , _A : List[str] , _A : Union[str, Any] ) -> Optional[Any]:
__magic_name__ : Optional[int] = self.num_labels
__magic_name__ : Tuple = DebertaVaForSequenceClassification(_A )
model.to(_A )
model.eval()
__magic_name__ : List[Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def __lowerCAmelCase ( self : int , _A : Union[str, Any] , _A : int , _A : List[str] , _A : Tuple , _A : str , _A : List[str] , _A : Tuple ) -> int:
__magic_name__ : Union[str, Any] = self.num_labels
__magic_name__ : Tuple = DebertaVaForTokenClassification(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Dict , _A : Union[str, Any] , _A : Any , _A : Tuple , _A : List[Any] , _A : int , _A : List[str] ) -> Any:
__magic_name__ : List[str] = DebertaVaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Union[str, Any] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[Any] , _A : int , _A : Optional[Any] , _A : Any , _A : str , _A : Any , _A : Tuple , _A : Tuple ) -> Dict:
__magic_name__ : str = DebertaVaForMultipleChoice(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : List[Any] = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
__magic_name__ : str = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Optional[int] = config_and_inputs
__magic_name__ : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : Tuple = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = True
A_ : Optional[Any] = False
A_ : Any = False
A_ : List[Any] = False
A_ : int = False
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
__magic_name__ : List[Any] = DebertaVaModelTester(self )
__magic_name__ : Optional[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[str] ) -> Dict:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : int ) -> str:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
__magic_name__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_A )
@slow
def __lowerCAmelCase ( self : int ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[Any] = DebertaVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def __lowerCAmelCase ( self : str ) -> List[Any]:
pass
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
__magic_name__ : Union[str, Any] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__magic_name__ : str = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__magic_name__ : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ : str = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
__magic_name__ : int = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' ) | 275 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : int = f'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase )
if number < 0:
return False
__magic_name__ : Tuple = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 275 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = s.rsplit(__lowerCAmelCase , __lowerCAmelCase )
return new.join(__lowerCAmelCase )
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = {}
snake_case_ = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
snake_case_ = key.replace(f"""{group_key}.""" , f"""{group_key}.group.""" )
if "res_path" in key:
snake_case_ = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
snake_case_ = rreplace(__lowerCAmelCase , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
snake_case_ = rreplace(__lowerCAmelCase , """.b""" , """.bias""" , 1 )
snake_case_ = value.float()
return upgrade
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True ) -> List[Any]:
from dall_e import Encoder
snake_case_ = Encoder()
if os.path.exists(__lowerCAmelCase ):
snake_case_ = torch.load(__lowerCAmelCase )
else:
snake_case_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case_ = ckpt.state_dict()
encoder.load_state_dict(__lowerCAmelCase )
if config_path is not None:
snake_case_ = FlavaImageCodebookConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case_ = FlavaImageCodebookConfig()
snake_case_ = FlavaImageCodebook(__lowerCAmelCase ).eval()
snake_case_ = encoder.state_dict()
snake_case_ = upgrade_state_dict(__lowerCAmelCase )
hf_model.load_state_dict(__lowerCAmelCase )
snake_case_ = hf_model.state_dict()
snake_case_ = count_parameters(__lowerCAmelCase )
snake_case_ = count_parameters(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__lowerCAmelCase )
else:
return hf_state_dict
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 347 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : int = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "encodec"
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase_ : Tuple=2_4_0_0_0 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=1_2_8 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : Dict=[8, 5, 4, 2] , lowerCAmelCase_ : Optional[Any]="weight_norm" , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="reflect" , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[Any]=1.0 , lowerCAmelCase_ : Dict=1_0_2_4 , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=True , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
lowercase_ = target_bandwidths
lowercase_ = sampling_rate
lowercase_ = audio_channels
lowercase_ = normalize
lowercase_ = chunk_length_s
lowercase_ = overlap
lowercase_ = hidden_size
lowercase_ = num_filters
lowercase_ = num_residual_layers
lowercase_ = upsampling_ratios
lowercase_ = norm_type
lowercase_ = kernel_size
lowercase_ = last_kernel_size
lowercase_ = residual_kernel_size
lowercase_ = dilation_growth_rate
lowercase_ = use_causal_conv
lowercase_ = pad_mode
lowercase_ = compress
lowercase_ = num_lstm_layers
lowercase_ = trim_right_ratio
lowercase_ = codebook_size
lowercase_ = codebook_dim if codebook_dim is not None else hidden_size
lowercase_ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''')
super().__init__(**lowerCAmelCase_)
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0))
| 136 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__A : Tuple = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__A : Any = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__A : str = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__A : Optional[int] = {
"num_train_timesteps": 40,
"sigma_min": 0.0_0_2,
"sigma_max": 80.0,
}
__A : Union[str, Any] = {
"num_train_timesteps": 201,
"sigma_min": 0.0_0_2,
"sigma_max": 80.0,
}
__A : Optional[Any] = {
"num_train_timesteps": 151,
"sigma_min": 0.0_0_2,
"sigma_max": 80.0,
}
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
if isinstance(A__ , A__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def UpperCamelCase_ ( A__ : Tuple , A__ : Tuple , A__ : str , A__ : Tuple , A__ : Any=False ):
'''simple docstring'''
lowerCAmelCase_ : Dict = checkpoint[f'{old_prefix}.in_layers.0.weight']
lowerCAmelCase_ : Any = checkpoint[f'{old_prefix}.in_layers.0.bias']
lowerCAmelCase_ : Tuple = checkpoint[f'{old_prefix}.in_layers.2.weight']
lowerCAmelCase_ : Optional[int] = checkpoint[f'{old_prefix}.in_layers.2.bias']
lowerCAmelCase_ : Optional[int] = checkpoint[f'{old_prefix}.emb_layers.1.weight']
lowerCAmelCase_ : List[Any] = checkpoint[f'{old_prefix}.emb_layers.1.bias']
lowerCAmelCase_ : Tuple = checkpoint[f'{old_prefix}.out_layers.0.weight']
lowerCAmelCase_ : Optional[Any] = checkpoint[f'{old_prefix}.out_layers.0.bias']
lowerCAmelCase_ : Union[str, Any] = checkpoint[f'{old_prefix}.out_layers.3.weight']
lowerCAmelCase_ : Optional[Any] = checkpoint[f'{old_prefix}.out_layers.3.bias']
if has_skip:
lowerCAmelCase_ : Any = checkpoint[f'{old_prefix}.skip_connection.weight']
lowerCAmelCase_ : Union[str, Any] = checkpoint[f'{old_prefix}.skip_connection.bias']
return new_checkpoint
def UpperCamelCase_ ( A__ : Dict , A__ : Any , A__ : int , A__ : Optional[Any] , A__ : Union[str, Any]=None ):
'''simple docstring'''
lowerCAmelCase_ : str = checkpoint[f'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
lowerCAmelCase_ : List[str] = checkpoint[f'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
lowerCAmelCase_ : Any = checkpoint[f'{old_prefix}.norm.weight']
lowerCAmelCase_ : Union[str, Any] = checkpoint[f'{old_prefix}.norm.bias']
lowerCAmelCase_ : Any = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ : Tuple = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ : Any = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ : Optional[int] = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ : int = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ : List[Any] = (
checkpoint[f'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
lowerCAmelCase_ : Dict = checkpoint[f'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_ ( A__ : str , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = torch.load(A__ , map_location="""cpu""" )
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : Optional[Any] = checkpoint["""time_embed.0.weight"""]
lowerCAmelCase_ : List[str] = checkpoint["""time_embed.0.bias"""]
lowerCAmelCase_ : Union[str, Any] = checkpoint["""time_embed.2.weight"""]
lowerCAmelCase_ : Dict = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
lowerCAmelCase_ : List[Any] = checkpoint["""label_emb.weight"""]
lowerCAmelCase_ : List[str] = checkpoint["""input_blocks.0.0.weight"""]
lowerCAmelCase_ : Any = checkpoint["""input_blocks.0.0.bias"""]
lowerCAmelCase_ : int = unet_config["""down_block_types"""]
lowerCAmelCase_ : List[Any] = unet_config["""layers_per_block"""]
lowerCAmelCase_ : int = unet_config["""attention_head_dim"""]
lowerCAmelCase_ : Dict = unet_config["""block_out_channels"""]
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Tuple = channels_list[0]
for i, layer_type in enumerate(A__ ):
lowerCAmelCase_ : Optional[int] = channels_list[i]
lowerCAmelCase_ : Union[str, Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A__ ):
lowerCAmelCase_ : List[Any] = f'down_blocks.{i}.resnets.{j}'
lowerCAmelCase_ : Union[str, Any] = f'input_blocks.{current_layer}.0'
lowerCAmelCase_ : Union[str, Any] = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase_ : Dict = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A__ ):
lowerCAmelCase_ : Dict = f'down_blocks.{i}.resnets.{j}'
lowerCAmelCase_ : Tuple = f'input_blocks.{current_layer}.0'
lowerCAmelCase_ : str = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase_ : List[str] = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
lowerCAmelCase_ : Union[str, Any] = f'down_blocks.{i}.attentions.{j}'
lowerCAmelCase_ : List[str] = f'input_blocks.{current_layer}.1'
lowerCAmelCase_ : Optional[int] = convert_attention(
A__ , A__ , A__ , A__ , A__ )
current_layer += 1
if i != len(A__ ) - 1:
lowerCAmelCase_ : Union[str, Any] = f'down_blocks.{i}.downsamplers.0'
lowerCAmelCase_ : str = f'input_blocks.{current_layer}.0'
lowerCAmelCase_ : Optional[int] = convert_resnet(A__ , A__ , A__ , A__ )
current_layer += 1
lowerCAmelCase_ : List[str] = current_channels
# hardcoded the mid-block for now
lowerCAmelCase_ : str = """mid_block.resnets.0"""
lowerCAmelCase_ : Optional[Any] = """middle_block.0"""
lowerCAmelCase_ : int = convert_resnet(A__ , A__ , A__ , A__ )
lowerCAmelCase_ : Dict = """mid_block.attentions.0"""
lowerCAmelCase_ : str = """middle_block.1"""
lowerCAmelCase_ : List[Any] = convert_attention(A__ , A__ , A__ , A__ , A__ )
lowerCAmelCase_ : Optional[int] = """mid_block.resnets.1"""
lowerCAmelCase_ : Tuple = """middle_block.2"""
lowerCAmelCase_ : Tuple = convert_resnet(A__ , A__ , A__ , A__ )
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Tuple = unet_config["""up_block_types"""]
for i, layer_type in enumerate(A__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase_ : Optional[Any] = f'up_blocks.{i}.resnets.{j}'
lowerCAmelCase_ : str = f'output_blocks.{current_layer}.0'
lowerCAmelCase_ : Dict = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
current_layer += 1
if i != len(A__ ) - 1:
lowerCAmelCase_ : Any = f'up_blocks.{i}.upsamplers.0'
lowerCAmelCase_ : Dict = f'output_blocks.{current_layer-1}.1'
lowerCAmelCase_ : List[Any] = convert_resnet(A__ , A__ , A__ , A__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase_ : str = f'up_blocks.{i}.resnets.{j}'
lowerCAmelCase_ : Optional[int] = f'output_blocks.{current_layer}.0'
lowerCAmelCase_ : Any = convert_resnet(A__ , A__ , A__ , A__ , has_skip=A__ )
lowerCAmelCase_ : Tuple = f'up_blocks.{i}.attentions.{j}'
lowerCAmelCase_ : Dict = f'output_blocks.{current_layer}.1'
lowerCAmelCase_ : Union[str, Any] = convert_attention(
A__ , A__ , A__ , A__ , A__ )
current_layer += 1
if i != len(A__ ) - 1:
lowerCAmelCase_ : Optional[int] = f'up_blocks.{i}.upsamplers.0'
lowerCAmelCase_ : Tuple = f'output_blocks.{current_layer-1}.2'
lowerCAmelCase_ : str = convert_resnet(A__ , A__ , A__ , A__ )
lowerCAmelCase_ : Union[str, Any] = checkpoint["""out.0.weight"""]
lowerCAmelCase_ : Any = checkpoint["""out.0.bias"""]
lowerCAmelCase_ : str = checkpoint["""out.2.weight"""]
lowerCAmelCase_ : List[Any] = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
__A : Optional[int] = parser.parse_args()
__A : List[Any] = strabool(args.class_cond)
__A : str = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__A : List[str] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__A : int = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__A : Union[str, Any] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__A : List[str] = None
__A : Dict = con_pt_to_diffuser(args.unet_path, unet_config)
__A : Tuple = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__A : int = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__A : Union[str, Any] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__A : Optional[int] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__A : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__A : Tuple = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 351 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCamelCase_ ( A__ : bytes , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : int = f'{sampling_rate}'
lowerCAmelCase_ : str = """1"""
lowerCAmelCase_ : Optional[int] = """f32le"""
lowerCAmelCase_ : Any = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(A__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase_ : Optional[int] = ffmpeg_process.communicate(A__ )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowerCAmelCase_ : Optional[Any] = output_stream[0]
lowerCAmelCase_ : Optional[int] = np.frombuffer(A__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def UpperCamelCase_ ( A__ : int , A__ : float , A__ : str = "f32le" , ):
'''simple docstring'''
lowerCAmelCase_ : int = f'{sampling_rate}'
lowerCAmelCase_ : Any = """1"""
if format_for_conversion == "s16le":
lowerCAmelCase_ : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCAmelCase_ : Union[str, Any] = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
lowerCAmelCase_ : int = platform.system()
if system == "Linux":
lowerCAmelCase_ : int = """alsa"""
lowerCAmelCase_ : int = """default"""
elif system == "Darwin":
lowerCAmelCase_ : List[str] = """avfoundation"""
lowerCAmelCase_ : Union[str, Any] = """:0"""
elif system == "Windows":
lowerCAmelCase_ : List[Any] = """dshow"""
lowerCAmelCase_ : Union[str, Any] = """default"""
lowerCAmelCase_ : Tuple = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
lowerCAmelCase_ : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase_ : List[str] = _ffmpeg_stream(A__ , A__ )
for item in iterator:
yield item
def UpperCamelCase_ ( A__ : int , A__ : float , A__ : Optional[int] = None , A__ : Optional[Union[Tuple[float, float], float]] = None , A__ : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
lowerCAmelCase_ : Union[str, Any] = stream_chunk_s
else:
lowerCAmelCase_ : Tuple = chunk_length_s
lowerCAmelCase_ : List[Any] = ffmpeg_microphone(A__ , A__ , format_for_conversion=A__ )
if format_for_conversion == "s16le":
lowerCAmelCase_ : Tuple = np.intaa
lowerCAmelCase_ : List[Any] = 2
elif format_for_conversion == "f32le":
lowerCAmelCase_ : Dict = np.floataa
lowerCAmelCase_ : int = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
lowerCAmelCase_ : Optional[Any] = chunk_length_s / 6
lowerCAmelCase_ : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(A__ , (int, float) ):
lowerCAmelCase_ : int = [stride_length_s, stride_length_s]
lowerCAmelCase_ : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase_ : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase_ : Dict = datetime.datetime.now()
lowerCAmelCase_ : Any = datetime.timedelta(seconds=A__ )
for item in chunk_bytes_iter(A__ , A__ , stride=(stride_left, stride_right) , stream=A__ ):
# Put everything back in numpy scale
lowerCAmelCase_ : Optional[int] = np.frombuffer(item["""raw"""] , dtype=A__ )
lowerCAmelCase_ : Dict = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
lowerCAmelCase_ : Dict = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCamelCase_ ( A__ : Any , A__ : int , A__ : Tuple[int, int] , A__ : bool = False ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = B""""""
lowerCAmelCase_, lowerCAmelCase_ : Any = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
lowerCAmelCase_ : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(A__ ) < chunk_len:
lowerCAmelCase_ : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(A__ ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase_ : Optional[Any] = (_stride_left, stride_right)
lowerCAmelCase_ : List[str] = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
lowerCAmelCase_ : List[Any] = False
yield item
lowerCAmelCase_ : str = stride_left
lowerCAmelCase_ : str = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(A__ ) > stride_left:
lowerCAmelCase_ : Tuple = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
lowerCAmelCase_ : Optional[Any] = False
yield item
def UpperCamelCase_ ( A__ : List[str] , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(A__ , stdout=subprocess.PIPE , bufsize=A__ ) as ffmpeg_process:
while True:
lowerCAmelCase_ : Union[str, Any] = ffmpeg_process.stdout.read(A__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 89 | 0 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def _lowerCAmelCase ( lowercase_ ):
return (gray > 127) & (gray <= 255)
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = np.zeros_like(lowercase_ )
UpperCAmelCase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCAmelCase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCAmelCase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCAmelCase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
snake_case_ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
snake_case_ = np.array(Image.open(lena_path))
# kernel to be applied
snake_case_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
snake_case_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
snake_case_ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 78 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""image_processor""", """tokenizer"""]
__UpperCamelCase = """LayoutLMv2ImageProcessor"""
__UpperCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self :Any , lowercase_ :int=None , lowercase_ :Union[str, Any]=None , **lowercase_ :Optional[Any] ) -> Dict:
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
UpperCAmelCase = kwargs.pop('feature_extractor' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :str , lowercase_ :Optional[int] , lowercase_ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ :Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ :Optional[Union[List[int], List[List[int]]]] = None , lowercase_ :bool = True , lowercase_ :Union[bool, str, PaddingStrategy] = False , lowercase_ :Union[bool, str, TruncationStrategy] = None , lowercase_ :Optional[int] = None , lowercase_ :int = 0 , lowercase_ :Optional[int] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[bool] = None , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = False , lowercase_ :bool = True , lowercase_ :Optional[Union[str, TensorType]] = None , **lowercase_ :Any , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features['words']
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
UpperCAmelCase = features.pop('pixel_values' )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] )
UpperCAmelCase = images
return encoded_inputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :List[Any] , lowercase_ :Any ) -> Optional[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(lowercase_ )} and {len(lowercase_ )}""" )
return images_with_overflow
def UpperCAmelCase__ ( self :Any , *lowercase_ :int , **lowercase_ :Tuple ) -> Tuple:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , *lowercase_ :List[Any] , **lowercase_ :Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self :int ) -> Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 78 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__lowerCamelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
__lowerCamelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
__lowerCamelCase : str = "audio"
__lowerCamelCase : str = "labels"
def a_ ( self : List[Any] , __lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __lowerCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
A__ = copy.deepcopy(self )
A__ = self.label_schema.copy()
A__ = features[self.label_column]
A__ = label_schema
return task_template
@property
def a_ ( self : List[str] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 276 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A : Optional[int] = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
A__ = self.transformer_dir
shutil.copy(
os.path.join(__lowerCAmelCase , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def a_ ( self : str ) -> Optional[int]:
"""simple docstring"""
A__ = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def a_ ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple=None ) -> Dict:
"""simple docstring"""
A__ = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
A__ = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
A__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
A__ = black.format_str(__lowerCAmelCase , mode=__lowerCAmelCase )
A__ = os.path.join(self.transformer_dir , """new_code.py""" )
with open(__lowerCAmelCase , """w""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__lowerCAmelCase )
with open(__lowerCAmelCase , """r""" ) as f:
self.assertTrue(f.read() , __lowerCAmelCase )
def a_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
A__ = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Tuple ) -> Any:
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , __lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , __lowerCAmelCase ) , )
# Copy consistency with a really long name
A__ = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , f'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , __lowerCAmelCase , __lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , __lowerCAmelCase , overwrite_result=re.sub("""Bert""" , """TestModel""" , __lowerCAmelCase ) , )
def a_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
A__ , A__ = check_copies.convert_to_localized_md(
__lowerCAmelCase , __lowerCAmelCase , localized_readme["""format_model_list"""] )
self.assertFalse(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
A__ , A__ = check_copies.convert_to_localized_md(
__lowerCAmelCase , __lowerCAmelCase , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__lowerCAmelCase )
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
A__ , A__ = check_copies.convert_to_localized_md(
__lowerCAmelCase , __lowerCAmelCase , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 276 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.