code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
_UpperCamelCase: Optional[int] = True
except (ImportError, ModuleNotFoundError):
_UpperCamelCase: Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowercase__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
re.sub('<n>' , '' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 255 |
'''simple docstring'''
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = []
UpperCamelCase__ :int = 1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
UpperCamelCase__ :Union[str, Any] = ''''''.join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution()) | 97 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _UpperCAmelCase ( A__ ):
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 102 |
'''simple docstring'''
from PIL import Image
def a ( __a , __a ) -> Image:
'''simple docstring'''
def brightness(__a ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__snake_case = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''') | 97 | 0 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( A__ ):
def __init__( self : str , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[int] , ):
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=UpperCamelCase_ , speech_processor=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , )
def UpperCAmelCase__ ( self : int , snake_case__ : int = "auto" ):
if slice_size == "auto":
lowerCamelCase_ : int =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase_ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.enable_attention_slicing(UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Tuple , snake_case__ : List[str] , snake_case__ : List[Any]=1_6000 , snake_case__ : List[str] = 512 , snake_case__ : Dict = 512 , snake_case__ : str = 50 , snake_case__ : Dict = 7.5 , snake_case__ : Dict = None , snake_case__ : List[Any] = 1 , snake_case__ : Any = 0.0 , snake_case__ : int = None , snake_case__ : Optional[int] = None , snake_case__ : Union[str, Any] = "pil" , snake_case__ : Tuple = True , snake_case__ : Optional[Any] = None , snake_case__ : Optional[Any] = 1 , **snake_case__ : List[Any] , ):
lowerCamelCase_ : int =self.speech_processor.feature_extractor(
UpperCamelCase_ , return_tensors="pt" , sampling_rate=UpperCamelCase_ ).input_features.to(self.device )
lowerCamelCase_ : List[str] =self.speech_model.generate(UpperCamelCase_ , max_length=48_0000 )
lowerCamelCase_ : Optional[Any] =self.speech_processor.tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , normalize=UpperCamelCase_ )[
0
]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase_ : Dict =1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase_ : Optional[Any] =len(UpperCamelCase_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCamelCase_ )}.""" )
# get prompt text embeddings
lowerCamelCase_ : Optional[int] =self.tokenizer(
UpperCamelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
lowerCamelCase_ : List[Any] =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ : Optional[int] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCamelCase_ : Tuple =text_input_ids[:, : self.tokenizer.model_max_length]
lowerCamelCase_ : Dict =self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ : Any =text_embeddings.shape
lowerCamelCase_ : Dict =text_embeddings.repeat(1 , UpperCamelCase_ , 1 )
lowerCamelCase_ : Dict =text_embeddings.view(bs_embed * num_images_per_prompt , UpperCamelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ : Dict =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ : List[str]
if negative_prompt is None:
lowerCamelCase_ : Dict =[''''''] * batch_size
elif type(UpperCamelCase_ ) is not type(UpperCamelCase_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase_ )} !="""
F""" {type(UpperCamelCase_ )}.""" )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase_ : Tuple =[negative_prompt]
elif batch_size != len(UpperCamelCase_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowerCamelCase_ : int =negative_prompt
lowerCamelCase_ : Optional[Any] =text_input_ids.shape[-1]
lowerCamelCase_ : List[Any] =self.tokenizer(
UpperCamelCase_ , padding="max_length" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="pt" , )
lowerCamelCase_ : Optional[int] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ : Tuple =uncond_embeddings.shape[1]
lowerCamelCase_ : Any =uncond_embeddings.repeat(1 , UpperCamelCase_ , 1 )
lowerCamelCase_ : List[Any] =uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ : Tuple =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ : Dict =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ : int =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ : Optional[int] =torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device="cpu" , dtype=UpperCamelCase_ ).to(
self.device )
else:
lowerCamelCase_ : Tuple =torch.randn(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=UpperCamelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowerCamelCase_ : str =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ : List[Any] =self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : List[str] =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ : Tuple ='''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : List[Any] ={}
if accepts_eta:
lowerCamelCase_ : List[str] =eta
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ : int =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ : int =self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
lowerCamelCase_ : List[Any] =self.unet(UpperCamelCase_ , UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ : List[Any] =noise_pred.chunk(2 )
lowerCamelCase_ : int =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : Tuple =self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Dict =1 / 0.18_215 * latents
lowerCamelCase_ : Dict =self.vae.decode(UpperCamelCase_ ).sample
lowerCamelCase_ : Dict =(image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ : List[str] =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ : Optional[int] =self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCamelCase_ , nsfw_content_detected=UpperCamelCase_ )
| 144 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__snake_case = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCamelCase__ :Tuple = g.get_repo('''huggingface/transformers''' )
UpperCamelCase__ :Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCamelCase__ :List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a )
UpperCamelCase__ :List[Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 97 | 0 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[str]=1 ) -> int:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any]=0 ) -> str:
_a : Optional[int] =[]
for old_item in old_list:
_a : Union[str, Any] =old_item.replace("""in_layers.0""" ,"""norm1""" )
_a : List[str] =new_item.replace("""in_layers.2""" ,"""conv1""" )
_a : Dict =new_item.replace("""out_layers.0""" ,"""norm2""" )
_a : Dict =new_item.replace("""out_layers.3""" ,"""conv2""" )
_a : Any =new_item.replace("""emb_layers.1""" ,"""time_emb_proj""" )
_a : Union[str, Any] =new_item.replace("""skip_connection""" ,"""conv_shortcut""" )
_a : List[str] =shave_segments(__a ,n_shave_prefix_segments=__a )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any]=0 ) -> Dict:
_a : Optional[int] =[]
for old_item in old_list:
_a : Tuple =old_item
_a : Any =new_item.replace("""norm.weight""" ,"""group_norm.weight""" )
_a : List[Any] =new_item.replace("""norm.bias""" ,"""group_norm.bias""" )
_a : Union[str, Any] =new_item.replace("""proj_out.weight""" ,"""proj_attn.weight""" )
_a : Tuple =new_item.replace("""proj_out.bias""" ,"""proj_attn.bias""" )
_a : Optional[int] =shave_segments(__a ,n_shave_prefix_segments=__a )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[str]=None ,_UpperCAmelCase : int=None ) -> str:
assert isinstance(__a ,__a ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Any =old_checkpoint[path]
_a : List[str] =old_tensor.shape[0] // 3
_a : Optional[Any] =(-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : int =old_tensor.shape[0] // config['''num_head_channels'''] // 3
_a : Dict =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a : List[Any] =old_tensor.split(channels // num_heads ,dim=1 )
_a : Union[str, Any] =query.reshape(__a )
_a : List[Any] =key.reshape(__a )
_a : Union[str, Any] =value.reshape(__a )
for path in paths:
_a : Tuple =path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Dict =new_path.replace("""middle_block.0""" ,"""mid_block.resnets.0""" )
_a : List[str] =new_path.replace("""middle_block.1""" ,"""mid_block.attentions.0""" )
_a : str =new_path.replace("""middle_block.2""" ,"""mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : Tuple =new_path.replace(replacement["""old"""] ,replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : List[str] =old_checkpoint[path['''old''']][:, :, 0]
else:
_a : List[str] =old_checkpoint[path['''old''']]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any] ) -> int:
_a : int ={}
_a : Union[str, Any] =checkpoint['''time_embed.0.weight''']
_a : Tuple =checkpoint['''time_embed.0.bias''']
_a : Union[str, Any] =checkpoint['''time_embed.2.weight''']
_a : Union[str, Any] =checkpoint['''time_embed.2.bias''']
_a : List[str] =checkpoint['''input_blocks.0.0.weight''']
_a : Tuple =checkpoint['''input_blocks.0.0.bias''']
_a : Any =checkpoint['''out.0.weight''']
_a : Dict =checkpoint['''out.0.bias''']
_a : str =checkpoint['''out.2.weight''']
_a : List[Any] =checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
_a : Optional[Any] =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_a : Optional[int] ={
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(__a )
}
# Retrieves the keys for the middle blocks only
_a : int =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_a : int ={
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(__a )
}
# Retrieves the keys for the output blocks only
_a : List[str] =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_a : Any ={
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(__a )
}
for i in range(1 ,__a ):
_a : Optional[int] =(i - 1) // (config['''num_res_blocks'''] + 1)
_a : List[Any] =(i - 1) % (config['''num_res_blocks'''] + 1)
_a : int =[key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
_a : Dict =[key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
_a : List[Any] =checkpoint[
F"input_blocks.{i}.0.op.weight"
]
_a : Optional[Any] =checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
_a : List[Any] =renew_resnet_paths(__a )
_a : Union[str, Any] ={'''old''': F"input_blocks.{i}.0", '''new''': F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
_a : Union[str, Any] ={'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
__a ,__a ,__a ,additional_replacements=[meta_path, resnet_op] ,config=__a )
if len(__a ):
_a : List[Any] =renew_attention_paths(__a )
_a : List[str] ={
'''old''': F"input_blocks.{i}.1",
'''new''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_a : Optional[Any] ={
F"input_blocks.{i}.1.qkv.bias": {
'''key''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'''query''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'''value''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
'''key''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'''query''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'''value''': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__a ,__a ,__a ,additional_replacements=[meta_path] ,attention_paths_to_split=__a ,config=__a ,)
_a : str =middle_blocks[0]
_a : List[Any] =middle_blocks[1]
_a : Optional[int] =middle_blocks[2]
_a : List[str] =renew_resnet_paths(__a )
assign_to_checkpoint(__a ,__a ,__a ,config=__a )
_a : Optional[int] =renew_resnet_paths(__a )
assign_to_checkpoint(__a ,__a ,__a ,config=__a )
_a : List[str] =renew_attention_paths(__a )
_a : int ={
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
__a ,__a ,__a ,attention_paths_to_split=__a ,config=__a )
for i in range(__a ):
_a : List[str] =i // (config['''num_res_blocks'''] + 1)
_a : Optional[Any] =i % (config['''num_res_blocks'''] + 1)
_a : Optional[int] =[shave_segments(__a ,2 ) for name in output_blocks[i]]
_a : Optional[int] ={}
for layer in output_block_layers:
_a : List[Any] =layer.split(""".""" )[0], shave_segments(__a ,1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__a )
else:
_a : Dict =[layer_name]
if len(__a ) > 1:
_a : Dict =[key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
_a : str =[key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
_a : Tuple =renew_resnet_paths(__a )
_a : int =renew_resnet_paths(__a )
_a : Union[str, Any] ={'''old''': F"output_blocks.{i}.0", '''new''': F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(__a ,__a ,__a ,additional_replacements=[meta_path] ,config=__a )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : str =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_a : str =checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
_a : List[Any] =checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(__a ) == 2:
_a : str =[]
if len(__a ):
_a : Optional[int] =renew_attention_paths(__a )
_a : Optional[Any] ={
'''old''': F"output_blocks.{i}.1",
'''new''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
_a : Optional[int] ={
F"output_blocks.{i}.1.qkv.bias": {
'''key''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'''query''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'''value''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
'''key''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'''query''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'''value''': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__a ,__a ,__a ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None ,config=__a ,)
else:
_a : Dict =renew_resnet_paths(__a ,n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : List[Any] ='''.'''.join(["""output_blocks""", str(__a ), path["""old"""]] )
_a : Optional[int] ='''.'''.join(["""up_blocks""", str(__a ), """resnets""", str(__a ), path["""new"""]] )
_a : int =checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
A__: Any = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
A__: List[Any] = parser.parse_args()
A__: str = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
A__: Union[str, Any] = json.loads(f.read())
A__: List[Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
A__: List[str] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
A__: Optional[Any] = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A__: int = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
A__: int = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 276 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) ) | 97 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any]=99 , UpperCamelCase_: Any=13 , UpperCamelCase_: List[Any]=16 , UpperCamelCase_: str=7 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Optional[int]=32 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: str=4 , UpperCamelCase_: Union[str, Any]=30 , UpperCamelCase_: Dict=0 , UpperCamelCase_: Dict=1 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Union[str, Any]=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = decoder_seq_length
# For common tests
__lowerCamelCase = self.decoder_seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = d_model
__lowerCamelCase = d_model
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = eos_token_id
__lowerCamelCase = bos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = decoder_start_token_id
__lowerCamelCase = use_cache
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = None
__lowerCamelCase = decoder_seq_length
__lowerCamelCase = 2
__lowerCamelCase = 1
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] , ):
__lowerCamelCase = True
__lowerCamelCase = TrOCRDecoder(config=UpperCamelCase_ ).to(UpperCamelCase_ ).eval()
__lowerCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__lowerCamelCase = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ , use_cache=UpperCamelCase_ )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) + 1 )
__lowerCamelCase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = model(UpperCamelCase_ )['''last_hidden_state''']
__lowerCamelCase = model(UpperCamelCase_ , past_key_values=UpperCamelCase_ )['''last_hidden_state''']
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__( A__ , A__ , A__ , unittest.TestCase):
UpperCAmelCase__ : Dict = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCAmelCase__ : Tuple = (TrOCRForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : Tuple = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Dict = False
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase_ )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
pass
def lowerCAmelCase__ ( self: Dict ):
pass
def lowerCAmelCase__ ( self: str ):
pass
def lowerCAmelCase__ ( self: str ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
return
@unittest.skip("""The model doesn\'t support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase__ ( self: Union[str, Any] ):
pass
| 12 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 | 0 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( A__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = CLIPTokenizer
lowerCamelCase__ : List[str] = CLIPTokenizerFast
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Any = {}
lowerCamelCase__ : Optional[Any] = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# fmt: off
a__ =['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
a__ =dict(zip(UpperCamelCase_, range(len(UpperCamelCase_ ) ) ) )
a__ =['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
a__ ={'''unk_token''': '''<unk>'''}
a__ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
a__ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def _UpperCAmelCase ( self, **lowercase_ ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **UpperCamelCase_ )
def _UpperCAmelCase ( self, **lowercase_ ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **UpperCamelCase_ )
def _UpperCAmelCase ( self, lowercase_ ) -> int:
"""simple docstring"""
a__ ='''lower newer'''
a__ ='''lower newer'''
return input_text, output_text
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
a__ ='''lower newer'''
a__ =['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
a__ =tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_, UpperCamelCase_ )
a__ =tokens + [tokenizer.unk_token]
a__ =[10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ), UpperCamelCase_ )
@require_ftfy
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a__ =self.tokenizer_class.from_pretrained(UpperCamelCase_, **UpperCamelCase_ )
a__ =self.rust_tokenizer_class.from_pretrained(UpperCamelCase_, **UpperCamelCase_ )
a__ ='''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
a__ =tokenizer_s.tokenize(UpperCamelCase_ )
a__ =tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_, UpperCamelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
a__ ='''xa\u0303y''' + ''' ''' + '''x\xe3y'''
a__ =tokenizer_s.tokenize(UpperCamelCase_ )
a__ =tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_, UpperCamelCase_ )
# Test that the tokenization is identical on unicode of space type
a__ =[
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
a__ =tokenizer_s.tokenize(UpperCamelCase_ )
a__ =tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_, UpperCamelCase_ )
# Test that the tokenization is identical on unicode of line break type
a__ =[
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
a__ =tokenizer_s.tokenize(UpperCamelCase_ )
a__ =tokenizer_r.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_, UpperCamelCase_ )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a__ ='''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a__ =F"""{text_of_1_token} {text_of_1_token}"""
a__ =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_, use_fast=UpperCamelCase_, )
a__ =tokenizer_r(UpperCamelCase_, return_offsets_mapping=UpperCamelCase_, add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )), )
a__ =F""" {text}"""
a__ =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_, use_fast=UpperCamelCase_, )
a__ =tokenizer_r(UpperCamelCase_, return_offsets_mapping=UpperCamelCase_, add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(UpperCamelCase_ ) + 1, 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )), )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
with self.assertRaises(UpperCamelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
pass
| 188 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 | 0 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_lowerCamelCase : int = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def a_ ( __lowercase : Tuple ) -> Optional[Any]:
if isinstance(__a , torch.Tensor ):
return image
elif isinstance(__a , PIL.Image.Image ):
_snake_case = [image]
_snake_case = [trans(img.convert('RGB' ) ) for img in image]
_snake_case = torch.stack(__a )
return image
class SCREAMING_SNAKE_CASE__ ( A__ ):
'''simple docstring'''
def __init__( self : int , lowercase : Any , lowercase : int ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_snake_case = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
def A ( self : Optional[Any] , lowercase : int ):
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def A ( self : Dict , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : Any ):
'''simple docstring'''
_snake_case = min(int(num_inference_steps * strength ) , UpperCamelCase_ )
_snake_case = max(num_inference_steps - init_timestep , 0 )
_snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A ( self : Optional[Any] , lowercase : List[str] , lowercase : str , lowercase : Any , lowercase : int , lowercase : Any , lowercase : str=None ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase_ )}''' )
_snake_case = image.to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_snake_case = init_latents.shape
_snake_case = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
# get latents
print('add noise to latents at timestep' , UpperCamelCase_ )
_snake_case = self.scheduler.add_noise(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_snake_case = init_latents
return latents
@torch.no_grad()
def __call__( self : List[Any] , lowercase : Optional[Any] = None , lowercase : List[str] = 0.8 , lowercase : int = 1 , lowercase : List[Any] = None , lowercase : Tuple = 0.0 , lowercase : Dict = 50 , lowercase : List[str] = None , lowercase : List[str] = "pil" , lowercase : Tuple = True , ):
'''simple docstring'''
self.check_inputs(UpperCamelCase_ )
# 2. Preprocess image
_snake_case = preprocess(UpperCamelCase_ )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
_snake_case = self.get_timesteps(UpperCamelCase_ , UpperCamelCase_ , self.device )
_snake_case = timesteps[:1].repeat(UpperCamelCase_ )
# 4. Prepare latent variables
_snake_case = self.prepare_latents(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.unet.dtype , self.device , UpperCamelCase_ )
_snake_case = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCamelCase_ ):
# 1. predict noise model_output
_snake_case = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_snake_case = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ , ).prev_sample
_snake_case = (image / 2 + 0.5).clamp(0 , 1 )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCamelCase_ ) | 282 |
'''simple docstring'''
def a ( __a , __a ) -> int:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError('''String lengths must match!''' )
UpperCamelCase__ :Union[str, Any] = 0
for chara, chara in zip(__a , __a ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase : List[Any] =imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase : List[Any] =cvtColor(img, COLOR_BGR2GRAY)
def SCREAMING_SNAKE_CASE ( ) -> int:
UpperCamelCase__ : List[Any] = cn.convert_to_negative(__a )
# assert negative_img array for at least one True
assert negative_img.any()
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__a , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
UpperCamelCase__ : List[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def SCREAMING_SNAKE_CASE ( ) -> Dict:
UpperCamelCase__ : int = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCamelCase__ : Optional[Any] = canny.canny(__a )
# assert canny array for at least one True
assert canny_array.any()
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
assert gg.gaussian_filter(__a , 5 , sigma=0.9 ).all()
def SCREAMING_SNAKE_CASE ( ) -> Any:
UpperCamelCase__ : Optional[int] = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
UpperCamelCase__ : Dict = conv.img_convolve(__a , __a ).astype(__a )
assert res.any()
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
assert med.median_filter(__a , 3 ).any()
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
UpperCamelCase__ : Dict = sob.sobel_filter(__a )
assert grad.any() and theta.any()
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
UpperCamelCase__ : str = sp.make_sepia(__a , 20 )
assert sepia.all()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> List[str]:
UpperCamelCase__ : List[Any] = bs.Burkes(imread(__a , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[Any]:
UpperCamelCase__ : int = rs.NearestNeighbour(imread(__a , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
UpperCamelCase__ : Dict = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
UpperCamelCase__ : str = imread(__a , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
UpperCamelCase__ : Tuple = image[x_coordinate][y_coordinate]
UpperCamelCase__ : int = lbp.get_neighbors_pixel(
__a , __a , __a , __a )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCamelCase__ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCamelCase__ : List[Any] = lbp.local_binary_value(__a , __a , __a )
assert lbp_image.any() | 189 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 | 0 |
'''simple docstring'''
import math
def A_ ( snake_case , snake_case = 0 , snake_case = 0 ):
SCREAMING_SNAKE_CASE:Dict = end or len(__a )
for i in range(__a , __a ):
SCREAMING_SNAKE_CASE:Tuple = i
SCREAMING_SNAKE_CASE:Union[str, Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
SCREAMING_SNAKE_CASE:Any = array[temp_index - 1]
temp_index -= 1
SCREAMING_SNAKE_CASE:Union[str, Any] = temp_index_value
return array
def A_ ( snake_case , snake_case , snake_case ): # Max Heap
SCREAMING_SNAKE_CASE:Dict = index
SCREAMING_SNAKE_CASE:Optional[Any] = 2 * index + 1 # Left Node
SCREAMING_SNAKE_CASE:Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
SCREAMING_SNAKE_CASE:Any = left_index
if right_index < heap_size and array[largest] < array[right_index]:
SCREAMING_SNAKE_CASE:Dict = right_index
if largest != index:
SCREAMING_SNAKE_CASE:Tuple = array[largest], array[index]
heapify(__a , __a , __a )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[str] = len(__a )
for i in range(n // 2 , -1 , -1 ):
heapify(__a , __a , __a )
for i in range(n - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE:Union[str, Any] = array[0], array[i]
heapify(__a , 0 , __a )
return array
def A_ ( snake_case , snake_case , snake_case , snake_case ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A_ ( snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:str = low
SCREAMING_SNAKE_CASE:Dict = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
SCREAMING_SNAKE_CASE:int = array[j], array[i]
i += 1
def A_ ( snake_case ):
if len(__a ) == 0:
return array
SCREAMING_SNAKE_CASE:Tuple = 2 * math.ceil(math.loga(len(__a ) ) )
SCREAMING_SNAKE_CASE:Optional[Any] = 16
return intro_sort(__a , 0 , len(__a ) , __a , __a )
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__a )
max_depth -= 1
SCREAMING_SNAKE_CASE:Any = median_of_a(__a , __a , start + ((end - start) // 2) + 1 , end - 1 )
SCREAMING_SNAKE_CASE:int = partition(__a , __a , __a , __a )
intro_sort(__a , __a , __a , __a , __a )
SCREAMING_SNAKE_CASE:Union[str, Any] = p
return insertion_sort(__a , __a , __a )
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = input("Enter numbers separated by a comma : ").strip()
A_ = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 139 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 | 0 |
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[Any]=None ):
lowerCAmelCase_ : List[str] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCAmelCase_ : Any = True, True
lowerCAmelCase_ : int = dfs(__a ,__a ,__a ,__a )
return path
def UpperCamelCase( __UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : str = -1
for i in range(__a ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCAmelCase_ : List[str] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCamelCase( __UpperCamelCase : Dict ,__UpperCamelCase : int ):
lowerCAmelCase_ : Union[str, Any] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCAmelCase_ : int = check_circuit_or_path(__a ,__a )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
lowerCAmelCase_ : Dict = 1
if check == 2:
lowerCAmelCase_ : List[Any] = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
lowerCAmelCase_ : Dict = dfs(__a ,__a ,__a )
print(__a )
def UpperCamelCase( ):
lowerCAmelCase_ : Dict = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCAmelCase_ : Optional[int] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCAmelCase_ : Optional[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCAmelCase_ : List[str] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCAmelCase_ : Optional[Any] = {
1: [],
2: []
# all degree is zero
}
lowerCAmelCase_ : Tuple = 10
check_euler(__a ,__a )
check_euler(__a ,__a )
check_euler(__a ,__a )
check_euler(__a ,__a )
check_euler(__a ,__a )
if __name__ == "__main__":
main()
| 103 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 97 | 0 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_snake_case = input('Enter image url: ').strip()
print(F'''Downloading image from {url} ...''')
_snake_case = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
_snake_case = soup.find('meta', {'property': 'og:image'})['content']
_snake_case = requests.get(image_url).content
_snake_case = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 250 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( __a ) -> int:
'''simple docstring'''
for param in module.parameters():
UpperCamelCase__ :Dict = False
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase__ :Optional[int] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Dict = plt.imshow(__a )
fig.axes.get_xaxis().set_visible(__a )
fig.axes.get_yaxis().set_visible(__a )
plt.show()
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :int = datetime.now()
UpperCamelCase__ :str = current_time.strftime('''%H:%M:%S''' )
return timestamp | 97 | 0 |
"""simple docstring"""
import os
def lowercase__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] = len(grid[0] )
lowercase : List[Any] = len(__a )
lowercase : Union[str, Any] = 0
lowercase : str = 0
lowercase : Optional[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__a ):
for j in range(n_rows - 3 ):
lowercase : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowercase : Optional[int] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowercase : Tuple = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowercase : str = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowercase : Optional[int] = max(
__a , __a , __a , __a )
if max_product > largest:
lowercase : List[Any] = max_product
return largest
def lowercase__ ( ) -> List[str]:
'''simple docstring'''
lowercase : int = []
with open(os.path.dirname(__a ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
lowercase : str = [[int(__a ) for i in grid[j]] for j in range(len(__a ) )]
return largest_product(__a )
if __name__ == "__main__":
print(solution())
| 255 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__snake_case = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__snake_case = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )} | 97 | 0 |
"""simple docstring"""
def lowercase ( _snake_case : int ) ->list:
"""simple docstring"""
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
__snake_case : Optional[int] = gray_code_sequence_string(__a )
#
# convert them to integers
for i in range(len(__a ) ):
__snake_case : Tuple = int(sequence[i] , 2 )
return sequence
def lowercase ( _snake_case : Tuple ) ->list:
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case : Union[str, Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case : List[str] = gray_code_sequence_string(bit_count - 1 )
__snake_case : Dict = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case : Dict = '''0''' + smaller_sequence[i]
sequence.append(__a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case : str = '''1''' + smaller_sequence[i]
sequence.append(__a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__snake_case = {
'''facebook/blenderbot_small-90M''': 512,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Union[str, Any] = add_prefix_space
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 97 | 0 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
A__ : List[Any] = True
except ImportError:
A__ : List[str] = False
A__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( lowerCamelCase__ : Optional[Any] ) -> Union[str, Any]:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowercase__ ( A__ ):
@staticmethod
def UpperCAmelCase__ ( snake_case__ : str ):
lowerCamelCase_ : Tuple =parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=UpperCamelCase_ , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=UpperCamelCase_ , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self : str , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any]=None , *snake_case__ : int ):
lowerCamelCase_ : int =testing
lowerCamelCase_ : Optional[int] =testing_file
lowerCamelCase_ : Dict =path
def UpperCAmelCase__ ( self : List[Any] ):
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCamelCase_ : Optional[int] =[directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(UpperCamelCase_ ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
lowerCamelCase_ : Optional[int] =(
Path(UpperCamelCase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCamelCase_ : int =path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase_ ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
lowerCamelCase_ : Union[str, Any] =json.load(UpperCamelCase_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=UpperCamelCase_ , extra_context=UpperCamelCase_ , )
lowerCamelCase_ : List[Any] =[directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
lowerCamelCase_ : Any =json.load(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] =configuration['''lowercase_modelname''']
lowerCamelCase_ : Union[str, Any] =configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F"""{directory}/configuration.json""" )
lowerCamelCase_ : str ='''PyTorch''' in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ : str ='''TensorFlow''' in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ : List[str] ='''Flax''' in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ : List[Any] =F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=UpperCamelCase_ )
# Tests require submodules as they have parent imports
with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , "w" ):
pass
shutil.move(
F"""{directory}/__init__.py""" , F"""{model_dir}/__init__.py""" , )
shutil.move(
F"""{directory}/configuration_{lowercase_model_name}.py""" , F"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(snake_case__ : Dict ):
with open(UpperCamelCase_ , "r" ) as f:
lowerCamelCase_ : List[str] =f.readlines()
with open(UpperCamelCase_ , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_tf_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_flax_{lowercase_model_name}.py""" , F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/{lowercase_model_name}.md""" , F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
F"""{directory}/tokenization_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
F"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
# Create temp file
lowerCamelCase_ : List[str] =mkstemp()
lowerCamelCase_ : Optional[int] =False
with fdopen(UpperCamelCase_ , "w" ) as new_file:
with open(UpperCamelCase_ ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase_ )
if line_to_copy_below in line:
lowerCamelCase_ : int =True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase_ )
if not line_found:
raise ValueError(F"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase_ , UpperCamelCase_ )
# Remove original file
remove(UpperCamelCase_ )
# Move new file
move(UpperCamelCase_ , UpperCamelCase_ )
def skip_units(snake_case__ : Union[str, Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(snake_case__ : Tuple ):
with open(UpperCamelCase_ ) as datafile:
lowerCamelCase_ : Union[str, Any] =[]
lowerCamelCase_ : int =False
lowerCamelCase_ : Dict =False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCamelCase_ : List[str] =line.split("\"" )[1]
lowerCamelCase_ : int =skip_units(UpperCamelCase_ )
elif "# Below: " in line and "##" not in line:
lowerCamelCase_ : str =line.split("\"" )[1]
lowerCamelCase_ : Tuple =skip_units(UpperCamelCase_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] =[]
elif "# Replace with" in line and "##" not in line:
lowerCamelCase_ : str =[]
elif "##" not in line:
lines_to_copy.append(UpperCamelCase_ )
remove(UpperCamelCase_ )
replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(UpperCamelCase_ )
| 144 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 97 | 0 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class A__ :
def __init__( self :int , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any]=sys.maxsize ) -> Optional[Any]:
'''simple docstring'''
_a : Union[str, Any] ='''bilinear'''
_a : Any =max_size
_a : str =short_edge_length
def __call__( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] ) -> Any:
'''simple docstring'''
_a : Dict =[]
for img in imgs:
_a : Union[str, Any] =img.shape[:2]
# later: provide list and randomly choose index for resize
_a : int =np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_a : Optional[int] =size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
_a : Any =size, scale * w
else:
_a : Tuple =scale * h, size
if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size:
_a : Union[str, Any] =self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ )
_a : Any =newh * scale
_a : str =neww * scale
_a : Union[str, Any] =int(neww + 0.5 )
_a : Any =int(newh + 0.5 )
if img.dtype == np.uinta:
_a : str =Image.fromarray(UpperCamelCase_ )
_a : Optional[Any] =pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_a : List[str] =np.asarray(UpperCamelCase_ )
else:
_a : Optional[int] =img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_a : str =nn.functional.interpolate(
UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 )
img_augs.append(UpperCamelCase_ )
return img_augs
class A__ :
def __init__( self :int , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_a : str =ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_a : Optional[int] =cfg.INPUT.FORMAT
_a : Optional[Any] =cfg.SIZE_DIVISIBILITY
_a : Tuple =cfg.PAD_VALUE
_a : Union[str, Any] =cfg.INPUT.MAX_SIZE_TEST
_a : List[str] =cfg.MODEL.DEVICE
_a : str =torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_a : List[str] =torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_a : List[Any] =lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_a : str =tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) )
_a : Optional[Any] =[im.shape[-2:] for im in images]
_a : Dict =[
nn.functional.pad(
UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ )
def __call__( self :Optional[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Any=False ) -> List[str]:
'''simple docstring'''
with torch.no_grad():
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a : Union[str, Any] =[images]
if single_image:
assert len(UpperCamelCase_ ) == 1
for i in range(len(UpperCamelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_a : Tuple =torch.tensor([im.shape[:2] for im in images] )
_a : Optional[Any] =self.aug(UpperCamelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_a : Tuple =[self.normalizer(UpperCamelCase_ ) for x in images]
# now pad them to do the following operations
_a : Dict =self.pad(UpperCamelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_a : Union[str, Any] =torch.true_divide(UpperCamelCase_ , UpperCamelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ) -> List[Any]:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[int] ) -> str:
assert torch.isfinite(__a ).all(), "Box tensor contains infinite or NaN!"
_a : Tuple =box_size
tensor[:, 0].clamp_(min=0 ,max=__a )
tensor[:, 1].clamp_(min=0 ,max=__a )
tensor[:, 2].clamp_(min=0 ,max=__a )
tensor[:, 3].clamp_(min=0 ,max=__a )
| 276 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Any = Generator(
cache_dir=UpperCamelCase_ , features=UpperCamelCase_ , generator=UpperCamelCase_ , gen_kwargs=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.streaming:
UpperCamelCase__ :Optional[Any] = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :int = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :Any = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
UpperCamelCase__ :List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset | 97 | 0 |
from math import factorial
class lowerCamelCase__:
def __init__( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = real
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [1] * rank
else:
__lowerCamelCase = rank
def __repr__( self: List[Any] ):
return (
F'{self.real}+'
F'{"+".join(str(UpperCamelCase_ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , UpperCamelCase_ )
def __add__( self: str , UpperCamelCase_: Optional[Any] ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return Dual(self.real + other , self.duals )
__lowerCamelCase = self.duals.copy()
__lowerCamelCase = other.duals.copy()
if len(UpperCamelCase_ ) > len(UpperCamelCase_ ):
o_dual.extend([1] * (len(UpperCamelCase_ ) - len(UpperCamelCase_ )) )
elif len(UpperCamelCase_ ) < len(UpperCamelCase_ ):
s_dual.extend([1] * (len(UpperCamelCase_ ) - len(UpperCamelCase_ )) )
__lowerCamelCase = []
for i in range(len(UpperCamelCase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , UpperCamelCase_ )
UpperCAmelCase__ : Dict = __add__
def __sub__( self: Optional[int] , UpperCamelCase_: Optional[int] ):
return self + other * -1
def __mul__( self: List[str] , UpperCamelCase_: List[str] ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , UpperCamelCase_ )
__lowerCamelCase = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = __mul__
def __truediv__( self: Optional[Any] , UpperCamelCase_: Any ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , UpperCamelCase_ )
raise ValueError
def __floordiv__( self: str , UpperCamelCase_: str ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , UpperCamelCase_ )
raise ValueError
def __pow__( self: Dict , UpperCamelCase_: str ):
if n < 0 or isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
__lowerCamelCase = self
for _ in range(n - 1 ):
x *= self
return x
def lowerCamelCase__ ( A__ : Tuple , A__ : List[str] , A__ : Tuple ):
'''simple docstring'''
if not callable(__a ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(__a , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(__a , __a ):
raise ValueError("""differentiate() requires an int as input for order""" )
__lowerCamelCase = Dual(__a , 1 )
__lowerCamelCase = func(__a )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 12 |
'''simple docstring'''
__snake_case = 65521
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Any = 0
for plain_chr in plain_text:
UpperCamelCase__ :List[str] = (a + ord(__a )) % MOD_ADLER
UpperCamelCase__ :Tuple = (b + a) % MOD_ADLER
return (b << 16) | a | 97 | 0 |
def UpperCAmelCase__ ( _A : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(__a ) , __a )
return number - int(__a )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 188 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 | 0 |
import os
from datetime import datetime as dt
from github import Github
_lowerCamelCase : Dict = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def a_ ( ) -> List[Any]:
_snake_case = Github(os.environ['GITHUB_TOKEN'] )
_snake_case = g.get_repo('huggingface/diffusers' )
_snake_case = repo.get_issues(state='open' )
for issue in open_issues:
_snake_case = sorted(issue.get_comments() , key=lambda __lowercase : i.created_at , reverse=__a )
_snake_case = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main() | 282 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 97 | 0 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __a :
def __lowercase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : int = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
UpperCamelCase__ : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
UpperCamelCase__ : int = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase__ : int = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowercase ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : int = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
UpperCamelCase__ : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
UpperCamelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
UpperCamelCase__ : Tuple = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
UpperCamelCase__ : str = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.get_dummy_components()
UpperCamelCase__ : List[Any] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ : int = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ : List[str] = inputs['''prompt''']
UpperCamelCase__ : Tuple = inputs['''generator''']
UpperCamelCase__ : Optional[Any] = inputs['''num_inference_steps''']
UpperCamelCase__ : List[str] = inputs['''output_type''']
if "image" in inputs:
UpperCamelCase__ : Optional[int] = inputs['''image''']
else:
UpperCamelCase__ : Optional[Any] = None
if "mask_image" in inputs:
UpperCamelCase__ : List[Any] = inputs['''mask_image''']
else:
UpperCamelCase__ : Union[str, Any] = None
if "original_image" in inputs:
UpperCamelCase__ : Optional[Any] = inputs['''original_image''']
else:
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : Tuple = pipe.encode_prompt(UpperCamelCase_ )
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[str] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase__ : List[str] = image
if mask_image is not None:
UpperCamelCase__ : List[Any] = mask_image
if original_image is not None:
UpperCamelCase__ : Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ : List[Any] = pipe(**UpperCamelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase_ )
UpperCamelCase__ : Dict = self.pipeline_class.from_pretrained(UpperCamelCase_ )
pipe_loaded.to(UpperCamelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase_ , UpperCamelCase_ ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
UpperCamelCase__ : List[Any] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ : str = inputs['''generator''']
UpperCamelCase__ : Optional[int] = inputs['''num_inference_steps''']
UpperCamelCase__ : Any = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCamelCase__ : List[str] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCamelCase__ : Dict = image
if mask_image is not None:
UpperCamelCase__ : int = mask_image
if original_image is not None:
UpperCamelCase__ : Optional[int] = original_image
UpperCamelCase__ : str = pipe_loaded(**UpperCamelCase_ )[0]
UpperCamelCase__ : Optional[Any] = np.abs(to_np(UpperCamelCase_ ) - to_np(UpperCamelCase_ ) ).max()
self.assertLess(UpperCamelCase_ , 1e-4 )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.get_dummy_components()
UpperCamelCase__ : List[str] = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ : Optional[int] = pipe(**UpperCamelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase_ )
UpperCamelCase__ : Tuple = self.pipeline_class.from_pretrained(UpperCamelCase_ )
pipe_loaded.to(UpperCamelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ : int = pipe_loaded(**UpperCamelCase_ )[0]
UpperCamelCase__ : Tuple = np.abs(to_np(UpperCamelCase_ ) - to_np(UpperCamelCase_ ) ).max()
self.assertLess(UpperCamelCase_ , 1e-4 ) | 189 |
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 97 | 0 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A_ = logging.get_logger("transformers.models.speecht5")
def A_ ( snake_case , snake_case , snake_case ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE:List[str] = checkpoint['''input_conv.weight_g''']
SCREAMING_SNAKE_CASE:Any = checkpoint['''input_conv.weight_v''']
SCREAMING_SNAKE_CASE:Dict = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE:Optional[Any] = checkpoint[F'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE:List[str] = checkpoint[F'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE:Dict = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE:int = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE:Dict = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE:Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE:Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE:Optional[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE:str = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE:Tuple = checkpoint['''output_conv.1.weight_g''']
SCREAMING_SNAKE_CASE:Dict = checkpoint['''output_conv.1.weight_v''']
SCREAMING_SNAKE_CASE:List[str] = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def A_ ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE:int = SpeechTaHifiGanConfig.from_pretrained(__a )
else:
SCREAMING_SNAKE_CASE:int = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE:Any = SpeechTaHifiGan(__a )
SCREAMING_SNAKE_CASE:Tuple = torch.load(__a )
load_weights(orig_checkpoint["model"]["generator"] , __a , __a )
SCREAMING_SNAKE_CASE:Optional[int] = np.load(__a )
SCREAMING_SNAKE_CASE:int = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE:Optional[int] = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE:str = torch.from_numpy(__a ).float()
SCREAMING_SNAKE_CASE:List[Any] = torch.from_numpy(__a ).float()
model.save_pretrained(__a )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__a )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
A_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 139 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
__snake_case = ''''''
def a ( __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
UpperCamelCase__ :List[str] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
UpperCamelCase__ :Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCamelCase__ :Tuple = api.user_timeline(screen_name=__a , count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
UpperCamelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCamelCase__ :Union[str, Any] = api.user_timeline(
screen_name=__a , count=200 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
UpperCamelCase__ :Tuple = alltweets[-1].id - 1
print(f'''...{len(__a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCamelCase__ :int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , '''w''' ) as f:
UpperCamelCase__ :Tuple = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''') | 97 | 0 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = year % 1_9
UpperCAmelCase__ = year % 4
UpperCAmelCase__ = year % 7
UpperCAmelCase__ = math.floor(year / 1_0_0 )
UpperCAmelCase__ = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
UpperCAmelCase__ = leap_day_inhibits / 4
UpperCAmelCase__ = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
UpperCAmelCase__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCAmelCase__ = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
UpperCAmelCase__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase , 4 , 1_8 )
else:
return datetime(lowerCamelCase , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
lowerCAmelCase__ : Any = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
import math
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = [True] * n
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCAmelCase__ = i * 2
while index < n:
UpperCAmelCase__ = False
UpperCAmelCase__ = index + i
UpperCAmelCase__ = [2]
for i in range(3 , lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase )
return primes
def a_ ( lowerCamelCase = 9_9_9_9_6_6_6_6_3_3_3_3 ):
UpperCAmelCase__ = math.floor(math.sqrt(lowerCamelCase ) ) + 1_0_0
UpperCAmelCase__ = prime_sieve(lowerCamelCase )
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = primes[prime_index]
while (last_prime**2) <= limit:
UpperCAmelCase__ = primes[prime_index + 1]
UpperCAmelCase__ = last_prime**2
UpperCAmelCase__ = next_prime**2
# Get numbers divisible by lps(current)
UpperCAmelCase__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCAmelCase__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCAmelCase__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCAmelCase__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
def a_ ( ):
UpperCAmelCase__ = Node(1 )
UpperCAmelCase__ = Node(2 )
UpperCAmelCase__ = Node(3 )
UpperCAmelCase__ = Node(4 )
UpperCAmelCase__ = Node(5 )
return tree
def a_ ( lowerCamelCase ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a_ ( lowerCamelCase ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a_ ( lowerCamelCase ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a_ ( lowerCamelCase ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
if root is None:
return output
UpperCAmelCase__ = deque([root] )
while process_queue:
UpperCAmelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = []
def populate_output(lowerCamelCase , lowerCamelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase , lowerCamelCase )
return output
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = []
def populate_output(lowerCamelCase , lowerCamelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase , lowerCamelCase )
return output
def a_ ( lowerCamelCase ):
if root is None:
return []
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = height(lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = 0
return output
def a_ ( ): # Main function for testing.
UpperCAmelCase__ = make_tree()
print(f'''In-order Traversal: {inorder(lowerCamelCase )}''' )
print(f'''Pre-order Traversal: {preorder(lowerCamelCase )}''' )
print(f'''Post-order Traversal: {postorder(lowerCamelCase )}''' , '\n' )
print(f'''Height of Tree: {height(lowerCamelCase )}''' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(lowerCamelCase ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(lowerCamelCase ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(lowerCamelCase , level=lowerCamelCase ) )
print('\nZigZag order Traversal: ' )
print(zigzag(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 98 | """simple docstring"""
import os
import numpy
import onnx
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = a.name
UpperCAmelCase__ = b.name
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = a == b
UpperCAmelCase__ = name_a
UpperCAmelCase__ = name_b
return res
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase , lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase , lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = os.path.dirname(lowerCamelCase )
UpperCAmelCase__ = os.path.basename(lowerCamelCase )
UpperCAmelCase__ = onnx.load(os.path.join(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = set()
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
for i in range(len(lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase )
dup_set.add(lowerCamelCase )
UpperCAmelCase__ = inits[j].data_type
UpperCAmelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase )
total_reduced_size += mem_size
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase )
else:
UpperCAmelCase__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
UpperCAmelCase__ = sorted(lowerCamelCase )
_remove_dup_initializers_from_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 'optimized_' + model_file_name
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
onnx.save(lowerCamelCase , lowerCamelCase )
return new_model
| 98 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase ) )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# Base Case
if index == len(lowerCamelCase ):
return True
# Recursive Step
for i in range(lowerCamelCase ):
if valid_coloring(graph[index] , lowerCamelCase , lowerCamelCase ):
# Color current vertex
UpperCAmelCase__ = i
# Validate coloring
if util_color(lowerCamelCase , lowerCamelCase , lowerCamelCase , index + 1 ):
return True
# Backtrack
UpperCAmelCase__ = -1
return False
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = [-1] * len(lowerCamelCase )
if util_color(lowerCamelCase , lowerCamelCase , lowerCamelCase , 0 ):
return colored_vertices
return []
| 98 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = ShapEImgaImgPipeline
snake_case__ = ["image"]
snake_case__ = ["image"]
snake_case__ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def __lowerCAmelCase ( self : List[str] ):
return 32
@property
def __lowerCAmelCase ( self : str ):
return 32
@property
def __lowerCAmelCase ( self : int ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : List[Any] ):
return 8
@property
def __lowerCAmelCase ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
UpperCAmelCase__ = CLIPVisionModel(lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = CLIPImageProcessor(
crop_size=224 ,do_center_crop=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,resample=3 ,size=224 ,)
return image_processor
@property
def __lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCAmelCase__ = PriorTransformer(**lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ = ShapERenderer(**lowerCamelCase__ )
return model
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = self.dummy_prior
UpperCAmelCase__ = self.dummy_image_encoder
UpperCAmelCase__ = self.dummy_image_processor
UpperCAmelCase__ = self.dummy_renderer
UpperCAmelCase__ = HeunDiscreteScheduler(
beta_schedule='exp' ,num_train_timesteps=1_024 ,prediction_type='sample' ,use_karras_sigmas=lowerCamelCase__ ,clip_sample=lowerCamelCase__ ,clip_sample_range=1.0 ,)
UpperCAmelCase__ = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str=0 ):
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCAmelCase__ = output.images[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase__ = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Tuple ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = torch_device == 'cpu'
UpperCAmelCase__ = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=lowerCamelCase__ ,relax_max_difference=lowerCamelCase__ ,)
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ = batch_size * [inputs[key]]
UpperCAmelCase__ = pipe(**lowerCamelCase__ ,num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
UpperCAmelCase__ = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
UpperCAmelCase__ = pipe(
lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='np' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
lowerCAmelCase__ : Optional[Any] = {'tokenization_herbert': ['HerbertTokenizer']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] = ['HerbertTokenizerFast']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | """simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,lowerCamelCase__ : int="</s>" ,lowerCamelCase__ : str="<unk>" ,lowerCamelCase__ : Union[str, Any]="<pad>" ,lowerCamelCase__ : int=125 ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : Union[str, Any] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase__ = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase__ = len(set(filter(lambda lowerCamelCase__ : bool('extra_id' in str(lowerCamelCase__ ) ) ,lowerCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token
super().__init__(
eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,extra_ids=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = extra_ids
UpperCAmelCase__ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase__ = len(self.special_tokens_encoder )
UpperCAmelCase__ = len(lowerCamelCase__ )
for i, token in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = self.vocab_size + i - n
UpperCAmelCase__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ):
if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
return token_ids_a + token_ids_a
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = [chr(lowerCamelCase__ ) for i in text.encode('utf-8' )]
return tokens
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : str ):
if token in self.special_tokens_encoder:
UpperCAmelCase__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = self.added_tokens_encoder[token]
elif len(lowerCamelCase__ ) != 1:
UpperCAmelCase__ = self.unk_token_id
else:
UpperCAmelCase__ = ord(lowerCamelCase__ ) + self._num_special_tokens
return token_id
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Any ):
if index in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[index]
else:
UpperCAmelCase__ = chr(index - self._num_special_tokens )
return token
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = b''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
else:
UpperCAmelCase__ = bytes([ord(lowerCamelCase__ )] )
bstring += tok_string
UpperCAmelCase__ = bstring.decode('utf-8' ,errors='ignore' )
return string
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
return ()
| 98 | 1 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def a_ ( ):
UpperCAmelCase__ = 1_0
UpperCAmelCase__ = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase__ = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowerCamelCase ) ),
} , features=lowerCamelCase , )
return dataset
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCamelCase )
return filename
# FILE_CONTENT + files
lowerCAmelCase__ : Tuple = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase__ = FILE_CONTENT
with open(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase )
return filename
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
import bza
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase__ = bytes(lowerCamelCase , 'utf-8' )
with bza.open(lowerCamelCase , 'wb' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
import gzip
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase__ = bytes(lowerCamelCase , 'utf-8' )
with gzip.open(lowerCamelCase , 'wb' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase__ = bytes(lowerCamelCase , 'utf-8' )
with lza.frame.open(lowerCamelCase , 'wb' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase , 'w' ) as archive:
archive.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
import tarfile
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase , 'w' ) as f:
f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
import lzma
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase__ = bytes(lowerCamelCase , 'utf-8' )
with lzma.open(lowerCamelCase , 'wb' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
import zipfile
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase__ = bytes(lowerCamelCase , 'utf-8' )
with zstd.open(lowerCamelCase , 'wb' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase__ = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase )
return filename
lowerCAmelCase__ : Optional[Any] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase__ : str = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase__ : Any = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase__ : Any = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase__ : Union[str, Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def a_ ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = datasets.Dataset.from_dict(lowerCamelCase )
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCamelCase ) ) as con:
UpperCAmelCase__ = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCamelCase , 'w' , newline='' ) as f:
UpperCAmelCase__ = csv.DictWriter(lowerCamelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCamelCase , 'w' , newline='' ) as f:
UpperCAmelCase__ = csv.DictWriter(lowerCamelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
import bza
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCamelCase , 'rb' ) as f:
UpperCAmelCase__ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase , 'wb' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCamelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase ) ) )
f.write(lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase__ = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCamelCase , 'wb' ) as f:
UpperCAmelCase__ = pq.ParquetWriter(lowerCamelCase , schema=lowerCamelCase )
UpperCAmelCase__ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase ) )] for k in DATA[0]} , schema=lowerCamelCase )
writer.write_table(lowerCamelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase__ = {'data': DATA}
with open(lowerCamelCase , 'w' ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase__ = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase , 'w' ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCamelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCamelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCamelCase , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCamelCase , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
import gzip
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCamelCase , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
import gzip
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCamelCase , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase ) ) )
f.write(lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase , 'w' ) as f:
f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase , 'w' ) as f:
f.add(lowerCamelCase , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = ['0', '1', '2', '3']
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCamelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = ['0', '1', '2', '3']
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCamelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = ['0', '1', '2', '3']
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCamelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase ) ) )
f.write(lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCamelCase , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( ):
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def a_ ( ):
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 98 | """simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
lowerCAmelCase__ : Union[str, Any] = namedtuple('CoinsDistribResult', 'moves excess')
def a_ ( lowerCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.left )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.right )
UpperCAmelCase__ = 1 - left_distrib_excess
UpperCAmelCase__ = 1 - right_distrib_excess
UpperCAmelCase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
UpperCAmelCase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase , lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase__ : str = get_tests_dir('fixtures')
lowerCAmelCase__ : List[str] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase__ : List[Any] = get_tests_dir('fixtures/dummy-config.json')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 0
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase__ = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCAmelCase__ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ ,'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained('bert-base' )
def __lowerCAmelCase ( self : List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ,revision='aaaaaa' )
def __lowerCAmelCase ( self : List[str] ):
with self.assertRaisesRegex(
lowerCamelCase__ ,'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' ,):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def __lowerCAmelCase ( self : int ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__ )
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
def __lowerCAmelCase ( self : str ):
try:
AutoConfig.register('custom' ,lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ ,lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ ,lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase__ = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : Optional[int] ):
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = True
try:
AutoConfig.register('custom' ,lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ ,lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowerCamelCase__ ,'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 98 | """simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def __lowerCAmelCase ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
def __lowerCAmelCase ( self : int ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 98 | 1 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "data2vec-audio"
def __init__( self : int ,lowerCamelCase__ : List[str]=32 ,lowerCamelCase__ : int=768 ,lowerCamelCase__ : Optional[Any]=12 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : int=3_072 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : Union[str, Any]=0.0_2 ,lowerCamelCase__ : int=1e-5 ,lowerCamelCase__ : Tuple="gelu" ,lowerCamelCase__ : Optional[int]=(512, 512, 512, 512, 512, 512, 512) ,lowerCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) ,lowerCamelCase__ : str=(10, 3, 3, 3, 3, 2, 2) ,lowerCamelCase__ : int=False ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : List[Any]=19 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Union[str, Any]=0.0_5 ,lowerCamelCase__ : str=10 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[Any]=10 ,lowerCamelCase__ : Dict=0 ,lowerCamelCase__ : Optional[Any]="sum" ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : int=256 ,lowerCamelCase__ : Any=(512, 512, 512, 512, 1_500) ,lowerCamelCase__ : Union[str, Any]=(5, 3, 3, 1, 1) ,lowerCamelCase__ : int=(1, 2, 3, 1, 1) ,lowerCamelCase__ : List[str]=512 ,lowerCamelCase__ : Dict=0 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Optional[Any]=False ,lowerCamelCase__ : Optional[int]=3 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : List[str]=3 ,lowerCamelCase__ : int=None ,**lowerCamelCase__ : int ,):
super().__init__(**lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = feat_extract_activation
UpperCAmelCase__ = list(lowerCamelCase__ )
UpperCAmelCase__ = list(lowerCamelCase__ )
UpperCAmelCase__ = list(lowerCamelCase__ )
UpperCAmelCase__ = conv_bias
UpperCAmelCase__ = num_conv_pos_embeddings
UpperCAmelCase__ = num_conv_pos_embedding_groups
UpperCAmelCase__ = conv_pos_kernel_size
UpperCAmelCase__ = len(self.conv_dim )
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = feat_proj_dropout
UpperCAmelCase__ = final_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ = mask_time_prob
UpperCAmelCase__ = mask_time_length
UpperCAmelCase__ = mask_time_min_masks
UpperCAmelCase__ = mask_feature_prob
UpperCAmelCase__ = mask_feature_length
UpperCAmelCase__ = mask_feature_min_masks
# ctc loss
UpperCAmelCase__ = ctc_loss_reduction
UpperCAmelCase__ = ctc_zero_infinity
# adapter
UpperCAmelCase__ = add_adapter
UpperCAmelCase__ = adapter_kernel_size
UpperCAmelCase__ = adapter_stride
UpperCAmelCase__ = num_adapter_layers
UpperCAmelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase__ = list(lowerCamelCase__ )
UpperCAmelCase__ = list(lowerCamelCase__ )
UpperCAmelCase__ = list(lowerCamelCase__ )
UpperCAmelCase__ = xvector_output_dim
@property
def __lowerCAmelCase ( self : Optional[Any] ):
return math.prod(self.conv_stride )
| 98 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ : Optional[Any] = logging.getLogger()
def a_ ( ):
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
UpperCAmelCase__ = parser.parse_args()
return args.f
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'run_glue_deebert.py' )
with patch.object(lowerCamelCase__ ,'argv' ,lowerCamelCase__ ):
UpperCAmelCase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ ,0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase__ : Optional[int] = Lock()
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase__ = min(lowerCamelCase , lowerCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase__ = max(lowerCamelCase , lowerCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
UpperCAmelCase__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=lowerCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
for i in range(1 , len(lowerCamelCase ) - 1 ):
UpperCAmelCase__ = Pipe()
UpperCAmelCase__ = Pipe()
process_array_.append(
Process(
target=lowerCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase__ = temp_rs
UpperCAmelCase__ = temp_rr
process_array_.append(
Process(
target=lowerCamelCase , args=(
len(lowerCamelCase ) - 1,
arr[len(lowerCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCamelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCamelCase ) ):
UpperCAmelCase__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def a_ ( ):
UpperCAmelCase__ = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*lowerCamelCase )
UpperCAmelCase__ = odd_even_transposition(lowerCamelCase )
print('Sorted List\n' )
print(*lowerCamelCase )
if __name__ == "__main__":
main()
| 98 | """simple docstring"""
import argparse
lowerCAmelCase__ : List[str] = 'docs/source/_static/js/custom.js'
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCamelCase , max_perimeter + 1 ):
UpperCAmelCase__ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCamelCase ):
UpperCAmelCase__ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def a_ ( lowerCamelCase = 1_0_0_0 ):
UpperCAmelCase__ = pythagorean_triple(lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""")
| 98 | """simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a_ ( lowerCamelCase ):
return np.dot(lowerCamelCase , lowerCamelCase )
class snake_case :
"""simple docstring"""
def __init__( self : int ,*,
lowerCamelCase__ : float = np.inf ,lowerCamelCase__ : str = "linear" ,lowerCamelCase__ : float = 0.0 ,):
UpperCAmelCase__ = regularization
UpperCAmelCase__ = gamma
if kernel == "linear":
UpperCAmelCase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
UpperCAmelCase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase__ = f'''Unknown kernel: {kernel}'''
raise ValueError(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.dot(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : list[ndarray] ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = observations
UpperCAmelCase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
def to_minimize(lowerCamelCase__ : ndarray ) -> float:
UpperCAmelCase__ = 0
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(lowerCamelCase__ )
UpperCAmelCase__ = LinearConstraint(lowerCamelCase__ ,0 ,0 )
UpperCAmelCase__ = Bounds(0 ,self.regularization )
UpperCAmelCase__ = minimize(
lowerCamelCase__ ,np.ones(lowerCamelCase__ ) ,bounds=lowerCamelCase__ ,constraints=[ly_contraint] ).x
UpperCAmelCase__ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase__ = 0
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
UpperCAmelCase__ = s / n
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,lowerCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase__ : Dict = '<<<<<<< This should probably be modified because it mentions: '
lowerCAmelCase__ : int = '=======\n>>>>>>>\n'
lowerCAmelCase__ : Optional[int] = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCAmelCase__ : Dict = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def a_ ( lowerCamelCase ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : ArgumentParser ):
UpperCAmelCase__ = parser.add_parser(
'convert' ,help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' ,)
train_parser.add_argument(
'--tfds_path' ,type=lowerCamelCase__ ,required=lowerCamelCase__ ,help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' ,)
train_parser.add_argument(
'--datasets_directory' ,type=lowerCamelCase__ ,required=lowerCamelCase__ ,help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,*lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = get_logger('datasets-cli/converting' )
UpperCAmelCase__ = tfds_path
UpperCAmelCase__ = datasets_directory
def __lowerCAmelCase ( self : List[Any] ):
if os.path.isdir(self._tfds_path ):
UpperCAmelCase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCAmelCase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
UpperCAmelCase__ = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = {}
if os.path.isdir(self._tfds_path ):
UpperCAmelCase__ = os.listdir(lowerCamelCase__ )
else:
UpperCAmelCase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ )
if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCamelCase__ ,encoding='utf-8' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = []
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = []
for line in lines:
UpperCAmelCase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCAmelCase__ = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
UpperCAmelCase__ = ''
continue
elif "from absl import logging" in out_line:
UpperCAmelCase__ = 'from datasets import logging\n'
elif "getLogger" in out_line:
UpperCAmelCase__ = out_line.replace('getLogger' ,'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCAmelCase__ = True
UpperCAmelCase__ = list(filter(lambda lowerCamelCase__ : e in out_line ,lowerCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + '\n' )
out_lines.append(lowerCamelCase__ )
out_lines.append(lowerCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCAmelCase__ = re.sub(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCAmelCase__ = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' ,lowerCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
UpperCAmelCase__ = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCAmelCase__ = True
out_lines.append(lowerCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCAmelCase__ = f_name.replace('.py' ,'' )
UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ )
os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCamelCase__ )
with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as f:
f.writelines(lowerCamelCase__ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
UpperCAmelCase__ = os.path.basename(lowerCamelCase__ )
UpperCAmelCase__ = imports_to_builder_map[f_name.replace('.py' ,'' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCamelCase__ ,lowerCamelCase__ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 98 | """simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase__ : List[Any] = '\\n\n'
lowerCAmelCase__ : Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCAmelCase__ : str = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : List[str]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = 'cuda'
else:
UpperCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = model.to(lowerCamelCase__ )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ )
UpperCAmelCase__ = encodings['input_ids']
UpperCAmelCase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ):
UpperCAmelCase__ = min(start_index + batch_size ,len(lowerCamelCase__ ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 98 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ):
super().__init__()
self.register_modules(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : Any ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : int = 100 ,lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase__ : Optional[float] = None ,lowerCamelCase__ : bool = True ,):
if audio_length_in_s is None:
UpperCAmelCase__ = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
UpperCAmelCase__ = int(lowerCamelCase__ )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
UpperCAmelCase__ = int(lowerCamelCase__ )
UpperCAmelCase__ = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCAmelCase__ = randn_tensor(lowerCamelCase__ ,generator=lowerCamelCase__ ,device=self.device ,dtype=lowerCamelCase__ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase__ ,device=audio.device )
UpperCAmelCase__ = self.scheduler.timesteps.to(lowerCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ = self.unet(lowerCamelCase__ ,lowerCamelCase__ ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ = self.scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = audio.clamp(-1 ,1 ).float().cpu().numpy()
UpperCAmelCase__ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase__ )
| 98 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : str = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "xglm"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any ,lowerCamelCase__ : Any=256_008 ,lowerCamelCase__ : Optional[Any]=2_048 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : List[str]=4_096 ,lowerCamelCase__ : Tuple=24 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Tuple=2 ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = ffn_dim
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = attention_heads
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = init_std
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98 | 1 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase__ = np.zeros((n + 1,) )
UpperCAmelCase__ = ya
UpperCAmelCase__ = xa
for k in range(lowerCamelCase ):
UpperCAmelCase__ = y[k] + step_size * ode_func(lowerCamelCase , y[k] )
UpperCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase , y[k] ) + ode_func(x + step_size , lowerCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | """simple docstring"""
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = BeautifulSoup(requests.get(lowerCamelCase , params=lowerCamelCase ).content , 'html.parser' )
UpperCAmelCase__ = soup.find('div' , attrs={'class': 'gs_ri'} )
UpperCAmelCase__ = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 98 | """simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCAmelCase__ : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ : Tuple = F"""down_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : int = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ : Tuple = F"""up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : int = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ : Any = F"""up_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : List[str] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ : List[str] = F"""down_blocks.{i}.downsamplers.0.conv."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ : Dict = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : List[Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ : str = 'mid_block.attentions.0.'
lowerCAmelCase__ : Union[str, Any] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ : int = F"""mid_block.resnets.{j}."""
lowerCAmelCase__ : List[str] = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a_ ( lowerCamelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
UpperCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ : List[str] = F"""encoder.down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : List[Any] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.downsamplers.0."""
lowerCAmelCase__ : str = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ : int = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : str = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ : Dict = F"""decoder.up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Optional[int] = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ : Any = F"""mid_block.resnets.{i}."""
lowerCAmelCase__ : Any = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def a_ ( lowerCamelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase__ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
UpperCAmelCase__ = reshape_weight_for_sd(lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCAmelCase__ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ : int = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
UpperCAmelCase__ = k[: -len('.q_proj.weight' )]
UpperCAmelCase__ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
UpperCAmelCase__ = k[: -len('.q_proj.bias' )]
UpperCAmelCase__ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
return new_state_dict
def a_ ( lowerCamelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ : Tuple = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : List[str] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : int = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ : Union[str, Any] = load_file(unet_path, device='cpu')
else:
lowerCAmelCase__ : str = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : Dict = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCAmelCase__ : Optional[Any] = load_file(vae_path, device='cpu')
else:
lowerCAmelCase__ : Optional[int] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : List[str] = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCAmelCase__ : Tuple = load_file(text_enc_path, device='cpu')
else:
lowerCAmelCase__ : Any = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCAmelCase__ : Any = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCAmelCase__ : Any = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ : Dict = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ : List[Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ : str = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ : List[Any] = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ : Tuple = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ : str = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ : Optional[Any] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ : List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ : int = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ : List[str] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 98 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = StableDiffusionInpaintPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case__ = frozenset([] )
def __lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=9 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=lowerCamelCase__ ,)
UpperCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
UpperCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act='gelu' ,projection_dim=512 ,)
UpperCAmelCase__ = CLIPTextModel(lowerCamelCase__ )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCAmelCase__ = floats_tensor((1, 3, 32, 32) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = image.cpu().permute(0 ,2 ,3 ,1 )[0]
UpperCAmelCase__ = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ).resize((64, 64) )
UpperCAmelCase__ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = StableDiffusionInpaintPipeline(**lowerCamelCase__ )
UpperCAmelCase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ )
UpperCAmelCase__ = sd_pipe(**lowerCamelCase__ ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
UpperCAmelCase__ = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase__ = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase__ ,safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
UpperCAmelCase__ = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,generator=lowerCamelCase__ ,output_type='np' ,)
UpperCAmelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
UpperCAmelCase__ = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase__ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase__ ,torch_dtype=torch.floataa ,safety_checker=lowerCamelCase__ ,)
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
UpperCAmelCase__ = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,generator=lowerCamelCase__ ,output_type='np' ,)
UpperCAmelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __lowerCAmelCase ( self : List[str] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
UpperCAmelCase__ = 'stabilityai/stable-diffusion-2-inpainting'
UpperCAmelCase__ = PNDMScheduler.from_pretrained(lowerCamelCase__ ,subfolder='scheduler' )
UpperCAmelCase__ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase__ ,safety_checker=lowerCamelCase__ ,scheduler=lowerCamelCase__ ,torch_dtype=torch.floataa ,)
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ = 'Face of a yellow cat, high resolution, sitting on a park bench'
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,mask_image=lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type='np' ,)
UpperCAmelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 98 | """simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["image_processor", "tokenizer"]
snake_case__ = "BlipImageProcessor"
snake_case__ = "AutoTokenizer"
def __init__( self : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ):
UpperCAmelCase__ = False
super().__init__(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = self.image_processor
def __call__( self : str ,lowerCamelCase__ : ImageInput = None ,lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : int = 0 ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCamelCase__ : Optional[int] ,):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
UpperCAmelCase__ = self.tokenizer
UpperCAmelCase__ = self.tokenizer(
text=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,stride=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_overflowing_tokens=lowerCamelCase__ ,return_special_tokens_mask=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_length=lowerCamelCase__ ,verbose=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ,)
return text_encoding
# add pixel_values
UpperCAmelCase__ = self.image_processor(lowerCamelCase__ ,return_tensors=lowerCamelCase__ )
if text is not None:
UpperCAmelCase__ = self.tokenizer(
text=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,stride=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_overflowing_tokens=lowerCamelCase__ ,return_special_tokens_mask=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_length=lowerCamelCase__ ,verbose=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ,)
else:
UpperCAmelCase__ = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def __lowerCAmelCase ( self : Any ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Optional[Any] ):
return self.tokenizer.batch_decode(*lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Dict ):
return self.tokenizer.decode(*lowerCamelCase__ ,**lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = self.tokenizer.model_input_names
UpperCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 98 | """simple docstring"""
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = BeautifulSoup(requests.get(lowerCamelCase , params=lowerCamelCase ).content , 'html.parser' )
UpperCAmelCase__ = soup.find('div' , attrs={'class': 'gs_ri'} )
UpperCAmelCase__ = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 98 | 1 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def a_ ( lowerCamelCase ):
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | """simple docstring"""
def a_ ( lowerCamelCase ):
return str(lowerCamelCase ) == str(lowerCamelCase )[::-1]
def a_ ( lowerCamelCase ):
return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] )
def a_ ( lowerCamelCase = 1_0_0_0_0 ):
UpperCAmelCase__ = []
for num in range(1 , lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = num
while iterations < 5_0:
UpperCAmelCase__ = sum_reverse(lowerCamelCase )
iterations += 1
if is_palindrome(lowerCamelCase ):
break
else:
lychrel_nums.append(lowerCamelCase )
return len(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | 1 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Union[str, Any] ):
pass
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = np.array(lowerCamelCase )
UpperCAmelCase__ = npimg.shape
return {"hash": hashimage(lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = MaskGenerationPipeline(model=lowerCamelCase__ ,image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
@slow
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = pipeline('mask-generation' ,model='facebook/sam-vit-huge' )
UpperCAmelCase__ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] ,)
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 'facebook/sam-vit-huge'
UpperCAmelCase__ = pipeline('mask-generation' ,model=lowerCamelCase__ )
UpperCAmelCase__ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] ,)
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : str = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | """simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Union[str, Any] ):
pass
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = np.array(lowerCamelCase )
UpperCAmelCase__ = npimg.shape
return {"hash": hashimage(lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = MaskGenerationPipeline(model=lowerCamelCase__ ,image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
@slow
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = pipeline('mask-generation' ,model='facebook/sam-vit-huge' )
UpperCAmelCase__ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] ,)
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 'facebook/sam-vit-huge'
UpperCAmelCase__ = pipeline('mask-generation' ,model=lowerCamelCase__ )
UpperCAmelCase__ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] ,)
| 98 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ : int = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = ['GLPNFeatureExtractor']
lowerCAmelCase__ : Any = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | """simple docstring"""
import functools
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = len(lowerCamelCase )
@functools.cache
def min_distance(lowerCamelCase , lowerCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = generate_pascal_triangle(lowerCamelCase )
for row_idx in range(lowerCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
UpperCAmelCase__ = []
for current_row_idx in range(lowerCamelCase ):
UpperCAmelCase__ = populate_current_row(lowerCamelCase , lowerCamelCase )
triangle.append(lowerCamelCase )
return triangle
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase__ , UpperCAmelCase__ = 1, 1
for current_col_idx in range(1 , lowerCamelCase ):
calculate_current_element(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
return current_row
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
UpperCAmelCase__ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase__ = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase__ = above_to_left_elt + above_to_right_elt
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
UpperCAmelCase__ = [[1]]
for row_index in range(1 , lowerCamelCase ):
UpperCAmelCase__ = [0] + result[-1] + [0]
UpperCAmelCase__ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase__ = sum(divmod(lowerCamelCase , 2 ) )
UpperCAmelCase__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase__ = row_first_half + row_second_half
result.append(lowerCamelCase )
return result
def a_ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase , lowerCamelCase ) -> None:
UpperCAmelCase__ = f'''{func.__name__}({value})'''
UpperCAmelCase__ = timeit(f'''__main__.{call}''' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCamelCase , lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 98 | """simple docstring"""
from PIL import Image
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(lowerCamelCase ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowerCAmelCase__ : Any = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 98 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCAmelCase__ : int = random.Random()
def a_ ( lowerCamelCase , lowerCamelCase=1.0 , lowerCamelCase=None , lowerCamelCase=None ):
if rng is None:
UpperCAmelCase__ = global_rng
UpperCAmelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any]=7 ,lowerCamelCase__ : Dict=400 ,lowerCamelCase__ : str=2_000 ,lowerCamelCase__ : int=24 ,lowerCamelCase__ : List[Any]=24 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : Dict=16_000 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Dict=True ,):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = min_seq_length
UpperCAmelCase__ = max_seq_length
UpperCAmelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase__ = feature_size
UpperCAmelCase__ = num_mel_bins
UpperCAmelCase__ = padding_value
UpperCAmelCase__ = sampling_rate
UpperCAmelCase__ = return_attention_mask
UpperCAmelCase__ = do_normalize
def __lowerCAmelCase ( self : List[str] ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Union[str, Any]=False ,lowerCamelCase__ : Optional[int]=False ):
def _flatten(lowerCamelCase__ : List[str] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
UpperCAmelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
UpperCAmelCase__ = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = SpeechaTextFeatureExtractor if is_speech_available() else None
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = SpeechaTextFeatureExtractionTester(self )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Dict ):
self.assertTrue(np.all(np.mean(lowerCamelCase__ ,axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ,axis=0 ) - 1 ) < 1e-3 ) )
def __lowerCAmelCase ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase__ = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase__ = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase__ = feature_extractor(lowerCamelCase__ ,padding=lowerCamelCase__ ,return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCAmelCase__ = feature_extractor(speech_inputs[0] ,return_tensors='np' ).input_features
UpperCAmelCase__ = feature_extractor(np_speech_inputs[0] ,return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-3 ) )
# Test batched
UpperCAmelCase__ = feature_extractor(lowerCamelCase__ ,return_tensors='np' ).input_features
UpperCAmelCase__ = feature_extractor(lowerCamelCase__ ,return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase__ = np.asarray(lowerCamelCase__ )
UpperCAmelCase__ = feature_extractor(lowerCamelCase__ ,return_tensors='np' ).input_features
UpperCAmelCase__ = feature_extractor(lowerCamelCase__ ,return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-3 ) )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase__ = ['longest', 'max_length', 'do_not_pad']
UpperCAmelCase__ = [None, 16, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = feature_extractor(
lowerCamelCase__ ,padding=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ )
UpperCAmelCase__ = inputs.input_features
UpperCAmelCase__ = inputs.attention_mask
UpperCAmelCase__ = [np.sum(lowerCamelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase__ = ['longest', 'max_length', 'do_not_pad']
UpperCAmelCase__ = [None, 16, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = feature_extractor(
lowerCamelCase__ ,max_length=lowerCamelCase__ ,padding=lowerCamelCase__ ,return_tensors='np' ,return_attention_mask=lowerCamelCase__ )
UpperCAmelCase__ = inputs.input_features
UpperCAmelCase__ = inputs.attention_mask
UpperCAmelCase__ = [np.sum(lowerCamelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase__ = feature_extractor(
lowerCamelCase__ ,padding='max_length' ,max_length=4 ,truncation=lowerCamelCase__ ,return_tensors='np' ,return_attention_mask=lowerCamelCase__ ,)
UpperCAmelCase__ = inputs.input_features
UpperCAmelCase__ = inputs.attention_mask
UpperCAmelCase__ = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase__ = feature_extractor(
lowerCamelCase__ ,padding='longest' ,max_length=4 ,truncation=lowerCamelCase__ ,return_tensors='np' ,return_attention_mask=lowerCamelCase__ ,)
UpperCAmelCase__ = inputs.input_features
UpperCAmelCase__ = inputs.attention_mask
UpperCAmelCase__ = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape ,(3, 4, 24) )
UpperCAmelCase__ = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase__ = feature_extractor(
lowerCamelCase__ ,padding='longest' ,max_length=16 ,truncation=lowerCamelCase__ ,return_tensors='np' ,return_attention_mask=lowerCamelCase__ ,)
UpperCAmelCase__ = inputs.input_features
UpperCAmelCase__ = inputs.attention_mask
UpperCAmelCase__ = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape ,(3, 6, 24) )
def __lowerCAmelCase ( self : Optional[Any] ):
import torch
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ = np.random.rand(100 ,32 ).astype(np.floataa )
UpperCAmelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase__ = feature_extractor.pad([{'input_features': inputs}] ,return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCAmelCase__ = feature_extractor.pad([{'input_features': inputs}] ,return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : Tuple ):
from datasets import load_dataset
UpperCAmelCase__ = load_dataset('hf-internal-testing/librispeech_asr_dummy' ,'clean' ,split='validation' )
# automatic decoding with librispeech
UpperCAmelCase__ = ds.sort('id' ).select(range(lowerCamelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowerCAmelCase ( self : Optional[Any] ):
# fmt: off
UpperCAmelCase__ = np.array([
-1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1,
-1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8,
-1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5,
] )
# fmt: on
UpperCAmelCase__ = self._load_datasamples(1 )
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ = feature_extractor(lowerCamelCase__ ,return_tensors='pt' ).input_features
self.assertEquals(input_features.shape ,(1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] ,lowerCamelCase__ ,atol=1e-4 ) )
| 98 | """simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Tuple = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowerCAmelCase__ : Optional[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {'BertModelTest': 'BertModelTester'}
UpperCAmelCase__ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
import argparse
lowerCAmelCase__ : List[str] = 'docs/source/_static/js/custom.js'
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase__ : Union[str, Any] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for attribute in key.split('.' ):
UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
UpperCAmelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCAmelCase__ = value
elif weight_type == "weight_g":
UpperCAmelCase__ = value
elif weight_type == "weight_v":
UpperCAmelCase__ = value
elif weight_type == "bias":
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = []
UpperCAmelCase__ = fairseq_model.state_dict()
UpperCAmelCase__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , hf_model.config.feat_extract_norm == 'group' , )
UpperCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCAmelCase__ = True
if "*" in mapped_key:
UpperCAmelCase__ = name.split(lowerCamelCase )[0].split('.' )[-2]
UpperCAmelCase__ = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
UpperCAmelCase__ = 'weight_g'
elif "weight_v" in name:
UpperCAmelCase__ = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
UpperCAmelCase__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ = 'weight'
else:
UpperCAmelCase__ = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = full_name.split('conv_layers.' )[-1]
UpperCAmelCase__ = name.split('.' )
UpperCAmelCase__ = int(items[0] )
UpperCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCAmelCase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCAmelCase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCAmelCase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCAmelCase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase )
@torch.no_grad()
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
# load the pre-trained checkpoints
UpperCAmelCase__ = torch.load(lowerCamelCase )
UpperCAmelCase__ = WavLMConfigOrig(checkpoint['cfg'] )
UpperCAmelCase__ = WavLMOrig(lowerCamelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
UpperCAmelCase__ = WavLMConfig.from_pretrained(lowerCamelCase )
else:
UpperCAmelCase__ = WavLMConfig()
UpperCAmelCase__ = WavLMModel(lowerCamelCase )
recursively_load_weights(lowerCamelCase , lowerCamelCase )
hf_wavlm.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Optional[int] = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
lowerCAmelCase__ : List[Any] = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | """simple docstring"""
import os
import numpy
import onnx
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = a.name
UpperCAmelCase__ = b.name
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = a == b
UpperCAmelCase__ = name_a
UpperCAmelCase__ = name_b
return res
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase , lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase , lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = os.path.dirname(lowerCamelCase )
UpperCAmelCase__ = os.path.basename(lowerCamelCase )
UpperCAmelCase__ = onnx.load(os.path.join(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = set()
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
for i in range(len(lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase )
dup_set.add(lowerCamelCase )
UpperCAmelCase__ = inits[j].data_type
UpperCAmelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase )
total_reduced_size += mem_size
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase )
else:
UpperCAmelCase__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
UpperCAmelCase__ = sorted(lowerCamelCase )
_remove_dup_initializers_from_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 'optimized_' + model_file_name
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
onnx.save(lowerCamelCase , lowerCamelCase )
return new_model
| 98 | 1 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,lowerCamelCase__ : int="</s>" ,lowerCamelCase__ : str="<unk>" ,lowerCamelCase__ : Union[str, Any]="<pad>" ,lowerCamelCase__ : int=125 ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : Union[str, Any] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase__ = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase__ = len(set(filter(lambda lowerCamelCase__ : bool('extra_id' in str(lowerCamelCase__ ) ) ,lowerCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token
super().__init__(
eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,extra_ids=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = extra_ids
UpperCAmelCase__ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase__ = len(self.special_tokens_encoder )
UpperCAmelCase__ = len(lowerCamelCase__ )
for i, token in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = self.vocab_size + i - n
UpperCAmelCase__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ):
if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
return token_ids_a + token_ids_a
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = [chr(lowerCamelCase__ ) for i in text.encode('utf-8' )]
return tokens
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : str ):
if token in self.special_tokens_encoder:
UpperCAmelCase__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = self.added_tokens_encoder[token]
elif len(lowerCamelCase__ ) != 1:
UpperCAmelCase__ = self.unk_token_id
else:
UpperCAmelCase__ = ord(lowerCamelCase__ ) + self._num_special_tokens
return token_id
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Any ):
if index in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[index]
else:
UpperCAmelCase__ = chr(index - self._num_special_tokens )
return token
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = b''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
else:
UpperCAmelCase__ = bytes([ord(lowerCamelCase__ )] )
bstring += tok_string
UpperCAmelCase__ = bstring.decode('utf-8' ,errors='ignore' )
return string
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
return ()
| 98 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = ShapEImgaImgPipeline
snake_case__ = ["image"]
snake_case__ = ["image"]
snake_case__ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def __lowerCAmelCase ( self : List[str] ):
return 32
@property
def __lowerCAmelCase ( self : str ):
return 32
@property
def __lowerCAmelCase ( self : int ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : List[Any] ):
return 8
@property
def __lowerCAmelCase ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
UpperCAmelCase__ = CLIPVisionModel(lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = CLIPImageProcessor(
crop_size=224 ,do_center_crop=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,resample=3 ,size=224 ,)
return image_processor
@property
def __lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCAmelCase__ = PriorTransformer(**lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ = ShapERenderer(**lowerCamelCase__ )
return model
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = self.dummy_prior
UpperCAmelCase__ = self.dummy_image_encoder
UpperCAmelCase__ = self.dummy_image_processor
UpperCAmelCase__ = self.dummy_renderer
UpperCAmelCase__ = HeunDiscreteScheduler(
beta_schedule='exp' ,num_train_timesteps=1_024 ,prediction_type='sample' ,use_karras_sigmas=lowerCamelCase__ ,clip_sample=lowerCamelCase__ ,clip_sample_range=1.0 ,)
UpperCAmelCase__ = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str=0 ):
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCAmelCase__ = output.images[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase__ = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Tuple ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = torch_device == 'cpu'
UpperCAmelCase__ = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=lowerCamelCase__ ,relax_max_difference=lowerCamelCase__ ,)
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ = batch_size * [inputs[key]]
UpperCAmelCase__ = pipe(**lowerCamelCase__ ,num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
UpperCAmelCase__ = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
UpperCAmelCase__ = pipe(
lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='np' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowerCAmelCase__ : Tuple = logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ : Tuple = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
lowerCAmelCase__ : Any = {'allegro/herbert-base-cased': 514}
lowerCAmelCase__ : Optional[Any] = {}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = HerbertTokenizer
def __init__( self : Tuple ,lowerCamelCase__ : int=None ,lowerCamelCase__ : int=None ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Union[str, Any]="<s>" ,lowerCamelCase__ : Union[str, Any]="<unk>" ,lowerCamelCase__ : str="<pad>" ,lowerCamelCase__ : List[Any]="<mask>" ,lowerCamelCase__ : List[str]="</s>" ,**lowerCamelCase__ : Optional[int] ,):
super().__init__(
lowerCamelCase__ ,lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,**lowerCamelCase__ ,)
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
UpperCAmelCase__ = self._tokenizer.model.save(lowerCamelCase__ ,name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 98 | """simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,lowerCamelCase__ : int="</s>" ,lowerCamelCase__ : str="<unk>" ,lowerCamelCase__ : Union[str, Any]="<pad>" ,lowerCamelCase__ : int=125 ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : Union[str, Any] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase__ = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase__ = len(set(filter(lambda lowerCamelCase__ : bool('extra_id' in str(lowerCamelCase__ ) ) ,lowerCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token
super().__init__(
eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,extra_ids=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = extra_ids
UpperCAmelCase__ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase__ = len(self.special_tokens_encoder )
UpperCAmelCase__ = len(lowerCamelCase__ )
for i, token in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = self.vocab_size + i - n
UpperCAmelCase__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ):
if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
return token_ids_a + token_ids_a
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = [chr(lowerCamelCase__ ) for i in text.encode('utf-8' )]
return tokens
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : str ):
if token in self.special_tokens_encoder:
UpperCAmelCase__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = self.added_tokens_encoder[token]
elif len(lowerCamelCase__ ) != 1:
UpperCAmelCase__ = self.unk_token_id
else:
UpperCAmelCase__ = ord(lowerCamelCase__ ) + self._num_special_tokens
return token_id
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Any ):
if index in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[index]
else:
UpperCAmelCase__ = chr(index - self._num_special_tokens )
return token
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = b''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
else:
UpperCAmelCase__ = bytes([ord(lowerCamelCase__ )] )
bstring += tok_string
UpperCAmelCase__ = bstring.decode('utf-8' ,errors='ignore' )
return string
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
return ()
| 98 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCamelCase ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : Dict ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[int, float] = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,**lowerCamelCase__ : List[Any] ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = size if size is not None else {'shortest_edge': 256}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
UpperCAmelCase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,param_name='crop_size' )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = do_center_crop
UpperCAmelCase__ = crop_size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_factor
UpperCAmelCase__ = offset
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : List[str] ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
if "shortest_edge" in size:
UpperCAmelCase__ = get_resize_output_image_size(lowerCamelCase__ ,size['shortest_edge'] ,default_to_square=lowerCamelCase__ )
elif "height" in size and "width" in size:
UpperCAmelCase__ = (size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCamelCase__ ,size=(size['height'], size['width']) ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[int, float] ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Union[str, Any] ,):
UpperCAmelCase__ = image.astype(np.floataa )
if offset:
UpperCAmelCase__ = image - (scale / 2)
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Dict ,):
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
UpperCAmelCase__ = to_numpy_array(lowerCamelCase__ )
if do_resize:
UpperCAmelCase__ = self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ )
if do_center_crop:
UpperCAmelCase__ = self.center_crop(lowerCamelCase__ ,size=lowerCamelCase__ )
if do_rescale:
UpperCAmelCase__ = self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ,offset=lowerCamelCase__ )
if do_normalize:
UpperCAmelCase__ = self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ )
UpperCAmelCase__ = to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ )
return image
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST ,**lowerCamelCase__ : Dict ,):
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = offset if offset is not None else self.offset
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ = image_std if image_std is not None else self.image_std
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
UpperCAmelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,param_name='crop_size' )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
UpperCAmelCase__ = make_batched(lowerCamelCase__ )
UpperCAmelCase__ = [
[
self._preprocess_image(
image=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,do_center_crop=lowerCamelCase__ ,crop_size=lowerCamelCase__ ,do_rescale=lowerCamelCase__ ,rescale_factor=lowerCamelCase__ ,offset=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,image_mean=lowerCamelCase__ ,image_std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,)
for img in video
]
for video in videos
]
UpperCAmelCase__ = {'pixel_values': videos}
return BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
| 98 | """simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
lowerCAmelCase__ : Union[str, Any] = namedtuple('CoinsDistribResult', 'moves excess')
def a_ ( lowerCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.left )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.right )
UpperCAmelCase__ = 1 - left_distrib_excess
UpperCAmelCase__ = 1 - right_distrib_excess
UpperCAmelCase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
UpperCAmelCase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase , lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = to_pil_image(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = pil_image.size
UpperCAmelCase__ = pytesseract.image_to_data(lowerCamelCase , lang=lowerCamelCase , output_type='dict' , config=lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
UpperCAmelCase__ = [idx for idx, word in enumerate(lowerCamelCase ) if not word.strip()]
UpperCAmelCase__ = [word for idx, word in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
UpperCAmelCase__ = [coord for idx, coord in enumerate(lowerCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCAmelCase__ = []
for x, y, w, h in zip(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = [x, y, x + w, y + h]
actual_boxes.append(lowerCamelCase )
# finally, normalize the bounding boxes
UpperCAmelCase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
assert len(lowerCamelCase ) == len(lowerCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : int ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = "" ,**lowerCamelCase__ : Any ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = size if size is not None else {'height': 224, 'width': 224}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_value
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCAmelCase__ = apply_ocr
UpperCAmelCase__ = ocr_lang
UpperCAmelCase__ = tesseract_config
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : str ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCAmelCase__ = (size['height'], size['width'])
return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[int, float] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Dict ,):
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, Iterable[float]] ,lowerCamelCase__ : Union[float, Iterable[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Optional[int] ,):
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : float = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : Union[float, Iterable[float]] = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST ,**lowerCamelCase__ : str ,):
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ = image_std if image_std is not None else self.image_std
UpperCAmelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCAmelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCAmelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCAmelCase__ = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
UpperCAmelCase__ = [to_numpy_array(lowerCamelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self ,'pytesseract' )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for image in images:
UpperCAmelCase__ , UpperCAmelCase__ = apply_tesseract(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
words_batch.append(lowerCamelCase__ )
boxes_batch.append(lowerCamelCase__ )
if do_resize:
UpperCAmelCase__ = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase__ = [self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase__ = [self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = BatchFeature(data={'pixel_values': images} ,tensor_type=lowerCamelCase__ )
if apply_ocr:
UpperCAmelCase__ = words_batch
UpperCAmelCase__ = boxes_batch
return data
| 98 | """simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def __lowerCAmelCase ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
def __lowerCAmelCase ( self : int ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 98 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = MvpTokenizer
snake_case__ = MvpTokenizerFast
snake_case__ = True
snake_case__ = filter_roberta_detectors
def __lowerCAmelCase ( self : Dict ):
super().setUp()
UpperCAmelCase__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCAmelCase__ = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) )
UpperCAmelCase__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCAmelCase__ = {'unk_token': '<unk>'}
UpperCAmelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def __lowerCAmelCase ( self : Union[str, Any] ,**lowerCamelCase__ : List[Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,**lowerCamelCase__ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[Any] ):
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self : Optional[int] ):
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def __lowerCAmelCase ( self : Dict ):
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
UpperCAmelCase__ = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ = tokenizer(lowerCamelCase__ ,max_length=len(lowerCamelCase__ ) ,padding=lowerCamelCase__ ,return_tensors='pt' )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
UpperCAmelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
# Test that special tokens are reset
@require_torch
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ = tokenizer(lowerCamelCase__ ,padding=lowerCamelCase__ ,return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' ,lowerCamelCase__ )
self.assertIn('attention_mask' ,lowerCamelCase__ )
self.assertNotIn('labels' ,lowerCamelCase__ )
self.assertNotIn('decoder_attention_mask' ,lowerCamelCase__ )
@require_torch
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ = tokenizer(text_target=lowerCamelCase__ ,max_length=32 ,padding='max_length' ,return_tensors='pt' )
self.assertEqual(32 ,targets['input_ids'].shape[1] )
@require_torch
def __lowerCAmelCase ( self : List[str] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,return_tensors='pt' )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(batch.input_ids.shape ,(2, 1_024) )
@require_torch
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = ['A long paragraph for summarization.']
UpperCAmelCase__ = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase__ = tokenizer(lowerCamelCase__ ,text_target=lowerCamelCase__ ,return_tensors='pt' )
UpperCAmelCase__ = inputs['input_ids']
UpperCAmelCase__ = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def __lowerCAmelCase ( self : Any ):
pass
def __lowerCAmelCase ( self : Tuple ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
UpperCAmelCase__ = 'A, <mask> AllenNLP sentence.'
UpperCAmelCase__ = tokenizer_r.encode_plus(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer_p.encode_plus(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
UpperCAmelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCAmelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase__ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCamelCase__ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 98 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ : Optional[Any] = logging.getLogger()
def a_ ( ):
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
UpperCAmelCase__ = parser.parse_args()
return args.f
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'run_glue_deebert.py' )
with patch.object(lowerCamelCase__ ,'argv' ,lowerCamelCase__ ):
UpperCAmelCase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ ,0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = SMALL_MODEL_IDENTIFIER
UpperCAmelCase__ = 'pt'
UpperCAmelCase__ = 'tf'
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : int ):
UpperCAmelCase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCamelCase__ )
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = TFAutoModel.from_pretrained(self.test_model ,from_pt=lowerCamelCase__ )
model_tf.save_pretrained(lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = 'mock_framework'
# Framework provided - return whatever the user provides
UpperCAmelCase__ = FeaturesManager.determine_framework(self.test_model ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCamelCase__ )
UpperCAmelCase__ = FeaturesManager.determine_framework(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCamelCase__ )
UpperCAmelCase__ = FeaturesManager.determine_framework(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCamelCase__ )
UpperCAmelCase__ = FeaturesManager.determine_framework(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCamelCase__ )
UpperCAmelCase__ = FeaturesManager.determine_framework(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = FeaturesManager.determine_framework(lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = MagicMock(return_value=lowerCamelCase__ )
with patch('transformers.onnx.features.is_tf_available' ,lowerCamelCase__ ):
UpperCAmelCase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCamelCase__ ,self.framework_pt )
# PyTorch not in environment -> use TensorFlow
UpperCAmelCase__ = MagicMock(return_value=lowerCamelCase__ )
with patch('transformers.onnx.features.is_torch_available' ,lowerCamelCase__ ):
UpperCAmelCase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCamelCase__ ,self.framework_tf )
# Both in environment -> use PyTorch
UpperCAmelCase__ = MagicMock(return_value=lowerCamelCase__ )
UpperCAmelCase__ = MagicMock(return_value=lowerCamelCase__ )
with patch('transformers.onnx.features.is_tf_available' ,lowerCamelCase__ ), patch(
'transformers.onnx.features.is_torch_available' ,lowerCamelCase__ ):
UpperCAmelCase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCamelCase__ ,self.framework_pt )
# Both not in environment -> raise error
UpperCAmelCase__ = MagicMock(return_value=lowerCamelCase__ )
UpperCAmelCase__ = MagicMock(return_value=lowerCamelCase__ )
with patch('transformers.onnx.features.is_tf_available' ,lowerCamelCase__ ), patch(
'transformers.onnx.features.is_torch_available' ,lowerCamelCase__ ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = FeaturesManager.determine_framework(self.test_model )
| 98 | """simple docstring"""
import argparse
lowerCAmelCase__ : List[str] = 'docs/source/_static/js/custom.js'
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 98 | 1 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 1_0, 'max_num_jobs': 1}, [range(1_0 )]),
({'num_shards': 1_0, 'max_num_jobs': 1_0}, [range(lowerCamelCase , i + 1 ) for i in range(1_0 )]),
({'num_shards': 1, 'max_num_jobs': 1_0}, [range(1 )]),
({'num_shards': 1_0, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 1_0 )]),
({'num_shards': 3, 'max_num_jobs': 1_0}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = _distribute_shards(**lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 1_0, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = _split_gen_kwargs(lowerCamelCase , lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def a_ ( lowerCamelCase , lowerCamelCase ):
if expected is RuntimeError:
with pytest.raises(lowerCamelCase ):
_number_of_shards_in_gen_kwargs(lowerCamelCase )
else:
UpperCAmelCase__ = _number_of_shards_in_gen_kwargs(lowerCamelCase )
assert out == expected
| 98 | """simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a_ ( lowerCamelCase ):
return np.dot(lowerCamelCase , lowerCamelCase )
class snake_case :
"""simple docstring"""
def __init__( self : int ,*,
lowerCamelCase__ : float = np.inf ,lowerCamelCase__ : str = "linear" ,lowerCamelCase__ : float = 0.0 ,):
UpperCAmelCase__ = regularization
UpperCAmelCase__ = gamma
if kernel == "linear":
UpperCAmelCase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
UpperCAmelCase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase__ = f'''Unknown kernel: {kernel}'''
raise ValueError(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.dot(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : list[ndarray] ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = observations
UpperCAmelCase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
def to_minimize(lowerCamelCase__ : ndarray ) -> float:
UpperCAmelCase__ = 0
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(lowerCamelCase__ )
UpperCAmelCase__ = LinearConstraint(lowerCamelCase__ ,0 ,0 )
UpperCAmelCase__ = Bounds(0 ,self.regularization )
UpperCAmelCase__ = minimize(
lowerCamelCase__ ,np.ones(lowerCamelCase__ ) ,bounds=lowerCamelCase__ ,constraints=[ly_contraint] ).x
UpperCAmelCase__ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase__ = 0
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
UpperCAmelCase__ = s / n
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,lowerCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import random
from typing import Any
def a_ ( lowerCamelCase ):
for _ in range(len(lowerCamelCase ) ):
UpperCAmelCase__ = random.randint(0 , len(lowerCamelCase ) - 1 )
UpperCAmelCase__ = random.randint(0 , len(lowerCamelCase ) - 1 )
UpperCAmelCase__ , UpperCAmelCase__ = data[b], data[a]
return data
if __name__ == "__main__":
lowerCAmelCase__ : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase__ : str = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 98 | """simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase__ : List[Any] = '\\n\n'
lowerCAmelCase__ : Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCAmelCase__ : str = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : List[str]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = 'cuda'
else:
UpperCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = model.to(lowerCamelCase__ )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ )
UpperCAmelCase__ = encodings['input_ids']
UpperCAmelCase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ):
UpperCAmelCase__ = min(start_index + batch_size ,len(lowerCamelCase__ ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 98 | 1 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ""
snake_case__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[DatasetInfo] = None ,lowerCamelCase__ : Optional[str] = None ,**lowerCamelCase__ : Optional[int] ,):
super().__init__(self ,**lowerCamelCase__ )
UpperCAmelCase__ = repo_info
UpperCAmelCase__ = token
UpperCAmelCase__ = None
def __lowerCAmelCase ( self : int ):
if self.dir_cache is None:
UpperCAmelCase__ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(lowerCamelCase__ ): {'name': str(lowerCamelCase__ ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : str = "rb" ,**lowerCamelCase__ : str ,):
if not isinstance(self.repo_info ,lowerCamelCase__ ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
UpperCAmelCase__ = hf_hub_url(self.repo_info.id ,lowerCamelCase__ ,revision=self.repo_info.sha )
return fsspec.open(
lowerCamelCase__ ,mode=lowerCamelCase__ ,headers=get_authentication_headers_for_url(lowerCamelCase__ ,use_auth_token=self.token ) ,client_kwargs={'trust_env': True} ,).open()
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : Any ,**lowerCamelCase__ : Union[str, Any] ):
self._get_dirs()
UpperCAmelCase__ = self._strip_protocol(lowerCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int]=False ,**lowerCamelCase__ : List[str] ):
self._get_dirs()
UpperCAmelCase__ = PurePosixPath(path.strip('/' ) )
UpperCAmelCase__ = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ = PurePosixPath(p.strip('/' ) )
UpperCAmelCase__ = p.parent
if root == path:
UpperCAmelCase__ = f
UpperCAmelCase__ = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 98 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : str = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "xglm"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any ,lowerCamelCase__ : Any=256_008 ,lowerCamelCase__ : Optional[Any]=2_048 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : List[str]=4_096 ,lowerCamelCase__ : Tuple=24 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Tuple=2 ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = ffn_dim
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = attention_heads
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = init_std
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = PhobertTokenizer
snake_case__ = False
def __lowerCAmelCase ( self : str ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
UpperCAmelCase__ = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) )
UpperCAmelCase__ = ['#version: 0.2', 'l à</w>']
UpperCAmelCase__ = {'unk_token': '<unk>'}
UpperCAmelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : List[Any] ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = 'Tôi là VinAI Research'
UpperCAmelCase__ = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = PhobertTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase__ = 'Tôi là VinAI Research'
UpperCAmelCase__ = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
UpperCAmelCase__ = tokenizer.tokenize(lowerCamelCase__ )
print(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = tokens + [tokenizer.unk_token]
UpperCAmelCase__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | """simple docstring"""
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase__ : Any = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["input_features"]
def __init__( self : Optional[int] ,lowerCamelCase__ : Any=80 ,lowerCamelCase__ : Optional[Any]=16_000 ,lowerCamelCase__ : List[Any]=160 ,lowerCamelCase__ : int=30 ,lowerCamelCase__ : int=400 ,lowerCamelCase__ : Any=0.0 ,lowerCamelCase__ : Union[str, Any]=False ,**lowerCamelCase__ : Optional[Any] ,):
super().__init__(
feature_size=lowerCamelCase__ ,sampling_rate=lowerCamelCase__ ,padding_value=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = n_fft
UpperCAmelCase__ = hop_length
UpperCAmelCase__ = chunk_length
UpperCAmelCase__ = chunk_length * sampling_rate
UpperCAmelCase__ = self.n_samples // hop_length
UpperCAmelCase__ = sampling_rate
UpperCAmelCase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=lowerCamelCase__ ,min_frequency=0.0 ,max_frequency=8_0_0_0.0 ,sampling_rate=lowerCamelCase__ ,norm='slaney' ,mel_scale='slaney' ,)
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : np.array ):
UpperCAmelCase__ = spectrogram(
lowerCamelCase__ ,window_function(self.n_fft ,'hann' ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters ,log_mel='log10' ,)
UpperCAmelCase__ = log_spec[:, :-1]
UpperCAmelCase__ = np.maximum(lowerCamelCase__ ,log_spec.max() - 8.0 )
UpperCAmelCase__ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __lowerCAmelCase ( lowerCamelCase__ : List[np.ndarray] ,lowerCamelCase__ : List[np.ndarray] ,lowerCamelCase__ : float = 0.0 ):
if attention_mask is not None:
UpperCAmelCase__ = np.array(lowerCamelCase__ ,np.intaa )
UpperCAmelCase__ = []
for vector, length in zip(lowerCamelCase__ ,attention_mask.sum(-1 ) ):
UpperCAmelCase__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase__ = padding_value
normed_input_values.append(lowerCamelCase__ )
else:
UpperCAmelCase__ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : int ,lowerCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[str] = "max_length" ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,**lowerCamelCase__ : Dict ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
UpperCAmelCase__ = isinstance(lowerCamelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
UpperCAmelCase__ = is_batched_numpy or (
isinstance(lowerCamelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ ,np.ndarray ):
UpperCAmelCase__ = np.asarray(lowerCamelCase__ ,dtype=np.floataa )
elif isinstance(lowerCamelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ = [np.asarray([raw_speech] ).T]
UpperCAmelCase__ = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
UpperCAmelCase__ = self.pad(
lowerCamelCase__ ,padding=lowerCamelCase__ ,max_length=max_length if max_length else self.n_samples ,truncation=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=return_attention_mask or do_normalize ,)
# zero-mean and unit-variance normalization
if do_normalize:
UpperCAmelCase__ = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] ,attention_mask=padded_inputs['attention_mask'] ,padding_value=self.padding_value ,)
UpperCAmelCase__ = np.stack(padded_inputs['input_features'] ,axis=0 )
# make sure list is in array format
UpperCAmelCase__ = padded_inputs.get('input_features' ).transpose(2 ,0 ,1 )
UpperCAmelCase__ = [self._np_extract_fbank_features(lowerCamelCase__ ) for waveform in input_features[0]]
if isinstance(input_features[0] ,lowerCamelCase__ ):
UpperCAmelCase__ = [np.asarray(lowerCamelCase__ ,dtype=np.floataa ) for feature in input_features]
else:
UpperCAmelCase__ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCAmelCase__ = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
UpperCAmelCase__ = padded_inputs.convert_to_tensors(lowerCamelCase__ )
return padded_inputs
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 98 | """simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCAmelCase__ : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ : Tuple = F"""down_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : int = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ : Tuple = F"""up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : int = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ : Any = F"""up_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : List[str] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ : List[str] = F"""down_blocks.{i}.downsamplers.0.conv."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ : Dict = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : List[Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ : str = 'mid_block.attentions.0.'
lowerCAmelCase__ : Union[str, Any] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ : int = F"""mid_block.resnets.{j}."""
lowerCAmelCase__ : List[str] = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a_ ( lowerCamelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
UpperCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ : List[str] = F"""encoder.down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : List[Any] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.downsamplers.0."""
lowerCAmelCase__ : str = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ : int = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : str = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ : Dict = F"""decoder.up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Optional[int] = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ : Any = F"""mid_block.resnets.{i}."""
lowerCAmelCase__ : Any = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def a_ ( lowerCamelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase__ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
UpperCAmelCase__ = reshape_weight_for_sd(lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCAmelCase__ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ : int = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
UpperCAmelCase__ = k[: -len('.q_proj.weight' )]
UpperCAmelCase__ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
UpperCAmelCase__ = k[: -len('.q_proj.bias' )]
UpperCAmelCase__ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
return new_state_dict
def a_ ( lowerCamelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ : Tuple = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : List[str] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : int = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ : Union[str, Any] = load_file(unet_path, device='cpu')
else:
lowerCAmelCase__ : str = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : Dict = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCAmelCase__ : Optional[Any] = load_file(vae_path, device='cpu')
else:
lowerCAmelCase__ : Optional[int] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : List[str] = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCAmelCase__ : Tuple = load_file(text_enc_path, device='cpu')
else:
lowerCAmelCase__ : Any = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCAmelCase__ : Any = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCAmelCase__ : Any = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ : Dict = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ : List[Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ : str = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ : List[Any] = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ : Tuple = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ : str = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ : Optional[Any] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ : List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ : int = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ : List[str] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 98 | 1 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCAmelCase__ : str = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
lowerCAmelCase__ : List[str] = 10
lowerCAmelCase__ : int = 256
def a_ ( lowerCamelCase ):
if len(lowerCamelCase ) < MIN_NUM_TOKENS:
return None
UpperCAmelCase__ = MinHash(num_perm=lowerCamelCase )
for token in set(lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def a_ ( lowerCamelCase ):
return {t for t in NON_ALPHA.split(lowerCamelCase ) if len(t.strip() ) > 0}
class snake_case :
"""simple docstring"""
def __init__( self : Any ,*,
lowerCamelCase__ : float = 0.8_5 ,):
UpperCAmelCase__ = duplication_jaccard_threshold
UpperCAmelCase__ = NUM_PERM
UpperCAmelCase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold ,num_perm=self._num_perm )
UpperCAmelCase__ = defaultdict(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : MinHash ):
UpperCAmelCase__ = self._index.query(lowerCamelCase__ )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(lowerCamelCase__ ,lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCamelCase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = []
for base, duplicates in self._duplicate_clusters.items():
UpperCAmelCase__ = [base] + list(lowerCamelCase__ )
# reformat the cluster to be a list of dict
UpperCAmelCase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(lowerCamelCase__ )
return duplicate_clusters
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = self.get_duplicate_clusters()
with open(lowerCamelCase__ ,'w' ) as f:
json.dump(lowerCamelCase__ ,lowerCamelCase__ )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ = element
UpperCAmelCase__ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def a_ ( lowerCamelCase ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase ) ) , max_queue_size=1_0_0 ) ):
di.add(lowerCamelCase , lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = get_tokens(lowerCamelCase )
UpperCAmelCase__ = get_tokens(lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCAmelCase__ : Any = None
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = []
for elementa in cluster:
UpperCAmelCase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
UpperCAmelCase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(lowerCamelCase , lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCAmelCase__ = 1
extremes.append(lowerCamelCase )
return extremes
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
global _shared_dataset
UpperCAmelCase__ = dataset
UpperCAmelCase__ = []
UpperCAmelCase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase , lowerCamelCase , ) , total=len(lowerCamelCase ) , ):
extremes_list.append(lowerCamelCase )
return extremes_list
def a_ ( lowerCamelCase , lowerCamelCase = 0.85 ):
UpperCAmelCase__ = make_duplicate_clusters(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
UpperCAmelCase__ = {}
UpperCAmelCase__ = find_extremes(lowerCamelCase , lowerCamelCase , lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
UpperCAmelCase__ = element
UpperCAmelCase__ = duplicate_indices - set(extreme_dict.keys() )
UpperCAmelCase__ = dataset.filter(lambda lowerCamelCase , lowerCamelCase : idx not in remove_indices , with_indices=lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCAmelCase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
UpperCAmelCase__ = extreme_dict[element['base_index']]['copies']
print(f'''Original dataset size: {len(lowerCamelCase )}''' )
print(f'''Number of duplicate clusters: {len(lowerCamelCase )}''' )
print(f'''Files in duplicate cluster: {len(lowerCamelCase )}''' )
print(f'''Unique files in duplicate cluster: {len(lowerCamelCase )}''' )
print(f'''Filtered dataset size: {len(lowerCamelCase )}''' )
return ds_filter, duplicate_clusters
| 98 | """simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def a_ ( lowerCamelCase ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = create_tensor(lowerCamelCase )
UpperCAmelCase__ = gather(lowerCamelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = [state.process_index]
UpperCAmelCase__ = gather_object(lowerCamelCase )
assert len(lowerCamelCase ) == state.num_processes, f'''{gathered_obj}, {len(lowerCamelCase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = create_tensor(lowerCamelCase )
UpperCAmelCase__ = broadcast(lowerCamelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def a_ ( lowerCamelCase ):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
UpperCAmelCase__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCAmelCase__ = torch.arange(state.num_processes ).to(state.device )
UpperCAmelCase__ = pad_across_processes(lowerCamelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def a_ ( lowerCamelCase ):
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase__ = create_tensor(lowerCamelCase )
UpperCAmelCase__ = reduce(lowerCamelCase , 'sum' )
UpperCAmelCase__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowerCamelCase , lowerCamelCase ), f'''{reduced_tensor} != {truth_tensor}'''
def a_ ( lowerCamelCase ):
# For now runs on only two processes
if state.num_processes != 2:
return
UpperCAmelCase__ = create_tensor(lowerCamelCase )
UpperCAmelCase__ = reduce(lowerCamelCase , 'mean' )
UpperCAmelCase__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowerCamelCase , lowerCamelCase ), f'''{reduced_tensor} != {truth_tensor}'''
def a_ ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
def a_ ( ):
UpperCAmelCase__ = PartialState()
state.print(f'''State: {state}''' )
state.print('testing gather' )
test_gather(lowerCamelCase )
state.print('testing gather_object' )
test_gather_object(lowerCamelCase )
state.print('testing broadcast' )
test_broadcast(lowerCamelCase )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowerCamelCase )
state.print('testing reduce_sum' )
test_reduce_sum(lowerCamelCase )
state.print('testing reduce_mean' )
test_reduce_mean(lowerCamelCase )
if __name__ == "__main__":
main()
| 98 | """simple docstring"""
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = BeautifulSoup(requests.get(lowerCamelCase , params=lowerCamelCase ).content , 'html.parser' )
UpperCAmelCase__ = soup.find('div' , attrs={'class': 'gs_ri'} )
UpperCAmelCase__ = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCAmelCase__ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCAmelCase__ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowerCAmelCase__ : set[int] = {ord(char) for char in VALID_CHARS}
lowerCAmelCase__ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = ""
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
for keychar, cipherchar in zip(cycle(lowerCamelCase ) , lowerCamelCase ):
UpperCAmelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCamelCase )
return decoded
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
for key in product(lowerCamelCase , repeat=3 ):
UpperCAmelCase__ = try_key(lowerCamelCase , lowerCamelCase )
if encoded is not None:
possibles.append(lowerCamelCase )
return possibles
def a_ ( lowerCamelCase , lowerCamelCase ):
return [possible for possible in possibles if common_word in possible.lower()]
def a_ ( lowerCamelCase = "p059_cipher.txt" ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = Path(lowerCamelCase ).parent.joinpath(lowerCamelCase ).read_text(encoding='utf-8' )
UpperCAmelCase__ = [int(lowerCamelCase ) for number in data.strip().split(',' )]
UpperCAmelCase__ = filter_valid_chars(lowerCamelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase__ = filter_common_word(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) == 1:
break
UpperCAmelCase__ = possibles[0]
return sum(ord(lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | """simple docstring"""
def a_ ( lowerCamelCase ):
return str(lowerCamelCase ) == str(lowerCamelCase )[::-1]
def a_ ( lowerCamelCase ):
return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] )
def a_ ( lowerCamelCase = 1_0_0_0_0 ):
UpperCAmelCase__ = []
for num in range(1 , lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = num
while iterations < 5_0:
UpperCAmelCase__ = sum_reverse(lowerCamelCase )
iterations += 1
if is_palindrome(lowerCamelCase ):
break
else:
lychrel_nums.append(lowerCamelCase )
return len(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | 1 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCAmelCase__ : Optional[int] = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCAmelCase__ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a_ ( lowerCamelCase ):
if "://" in dataset_path:
UpperCAmelCase__ = dataset_path.split('://' )[1]
return dataset_path
def a_ ( lowerCamelCase ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = not is_remote_filesystem(lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase ) , fs._strip_protocol(lowerCamelCase ) )
else:
fs.mv(lowerCamelCase , lowerCamelCase , recursive=lowerCamelCase )
def a_ ( ):
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = threading.Lock()
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : str = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "nllb-moe"
snake_case__ = ["past_key_values"]
snake_case__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple ,lowerCamelCase__ : Optional[Any]=128_112 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : Any=12 ,lowerCamelCase__ : Any=4_096 ,lowerCamelCase__ : Dict=16 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : Any=4_096 ,lowerCamelCase__ : str=16 ,lowerCamelCase__ : str=0.0_5 ,lowerCamelCase__ : Dict=0.0_5 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Dict="relu" ,lowerCamelCase__ : Any=1_024 ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : str=0.0_2 ,lowerCamelCase__ : Any=2 ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : Optional[int]="float32" ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : Dict=128 ,lowerCamelCase__ : int=64 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : Any=4 ,lowerCamelCase__ : Dict=0.0_0_1 ,lowerCamelCase__ : List[str]=0.0_0_1 ,lowerCamelCase__ : Optional[Any]="all" ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : str=False ,lowerCamelCase__ : Optional[int]=1.0 ,lowerCamelCase__ : Union[str, Any]=0.2 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : List[Any]=0 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Optional[Any]=False ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = encoder_attention_heads
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = init_std
UpperCAmelCase__ = encoder_layerdrop
UpperCAmelCase__ = decoder_layerdrop
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = router_z_loss_coef
UpperCAmelCase__ = router_aux_loss_coef
UpperCAmelCase__ = decoder_sparse_step
UpperCAmelCase__ = encoder_sparse_step
UpperCAmelCase__ = num_experts
UpperCAmelCase__ = expert_capacity
UpperCAmelCase__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
UpperCAmelCase__ = router_dtype
UpperCAmelCase__ = router_ignore_padding_tokens
UpperCAmelCase__ = batch_prioritized_routing
UpperCAmelCase__ = second_expert_policy
UpperCAmelCase__ = normalize_router_prob_before_dropping
UpperCAmelCase__ = moe_eval_capacity_token_fraction
UpperCAmelCase__ = moe_token_dropout
UpperCAmelCase__ = output_router_logits
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98 | """simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Union[str, Any] ):
pass
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = np.array(lowerCamelCase )
UpperCAmelCase__ = npimg.shape
return {"hash": hashimage(lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = MaskGenerationPipeline(model=lowerCamelCase__ ,image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
@slow
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = pipeline('mask-generation' ,model='facebook/sam-vit-huge' )
UpperCAmelCase__ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] ,)
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 'facebook/sam-vit-huge'
UpperCAmelCase__ = pipeline('mask-generation' ,model=lowerCamelCase__ )
UpperCAmelCase__ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] ,)
| 98 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase ) + 1
UpperCAmelCase__ = len(lowerCamelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase__ = [[0 for i in range(lowerCamelCase )] for j in range(lowerCamelCase )]
# since string of zero length match pattern of zero length
UpperCAmelCase__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowerCamelCase ):
UpperCAmelCase__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowerCamelCase ):
UpperCAmelCase__ = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowerCamelCase ):
for j in range(1 , lowerCamelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase__ = dp[i - 1][j]
else:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowerCAmelCase__ : List[Any] = 'aab'
lowerCAmelCase__ : List[Any] = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 98 | """simple docstring"""
import functools
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = len(lowerCamelCase )
@functools.cache
def min_distance(lowerCamelCase , lowerCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if index == r:
for j in range(lowerCamelCase ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase__ = arr[i]
combination_util(lowerCamelCase , lowerCamelCase , lowerCamelCase , index + 1 , lowerCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# A temporary array to store all combination one by one
UpperCAmelCase__ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowerCamelCase , lowerCamelCase , lowerCamelCase , 0 , lowerCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowerCAmelCase__ : str = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 98 | """simple docstring"""
from PIL import Image
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(lowerCamelCase ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowerCAmelCase__ : Any = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 98 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowerCAmelCase__ : str = logging.get_logger(__name__)
@dataclass
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Union[str, Any] ,**lowerCamelCase__ : Optional[Any] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase__ = deprecated_arg[3:]
setattr(self ,lowerCamelCase__ ,not kwargs.pop(lowerCamelCase__ ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCAmelCase__ = kwargs.pop('torchscript' ,self.torchscript )
UpperCAmelCase__ = kwargs.pop('torch_xla_tpu_print_metrics' ,self.torch_xla_tpu_print_metrics )
UpperCAmelCase__ = kwargs.pop('fp16_opt_level' ,self.fpaa_opt_level )
super().__init__(**lowerCamelCase__ )
snake_case__ = field(default=__UpperCAmelCase , metadata={"help": "Trace the models using torchscript"} )
snake_case__ = field(default=__UpperCAmelCase , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
snake_case__ = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def __lowerCAmelCase ( self : Optional[int] ):
requires_backends(self ,['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
UpperCAmelCase__ = torch.device('cpu' )
UpperCAmelCase__ = 0
elif is_torch_tpu_available():
UpperCAmelCase__ = xm.xla_device()
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCAmelCase__ = torch.cuda.device_count()
return device, n_gpu
@property
def __lowerCAmelCase ( self : Tuple ):
return is_torch_tpu_available() and self.tpu
@property
def __lowerCAmelCase ( self : Any ):
requires_backends(self ,['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __lowerCAmelCase ( self : Dict ):
requires_backends(self ,['torch'] )
return self._setup_devices[0]
@property
def __lowerCAmelCase ( self : int ):
requires_backends(self ,['torch'] )
return self._setup_devices[1]
@property
def __lowerCAmelCase ( self : List[str] ):
return self.n_gpu > 0
| 98 | """simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Tuple = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowerCAmelCase__ : Optional[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {'BertModelTest': 'BertModelTester'}
UpperCAmelCase__ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : str = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "xglm"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any ,lowerCamelCase__ : Any=256_008 ,lowerCamelCase__ : Optional[Any]=2_048 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : List[str]=4_096 ,lowerCamelCase__ : Tuple=24 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Tuple=2 ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = ffn_dim
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = attention_heads
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = init_std
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int]=13 ,lowerCamelCase__ : Optional[int]=32 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : str=16 ,lowerCamelCase__ : Union[str, Any]=[1, 2, 1] ,lowerCamelCase__ : Tuple=[2, 2, 4] ,lowerCamelCase__ : Optional[Any]=2 ,lowerCamelCase__ : Optional[Any]=2.0 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Optional[Any]=False ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Optional[int]=0.0_2 ,lowerCamelCase__ : Optional[int]=1e-5 ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : int=None ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Optional[Any]=10 ,lowerCamelCase__ : Optional[Any]=8 ,):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = window_size
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = use_absolute_embeddings
UpperCAmelCase__ = patch_norm
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = is_training
UpperCAmelCase__ = scope
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = encoder_stride
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Union[str, Any] ):
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[Any] ):
UpperCAmelCase__ = SwinvaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ )
UpperCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ):
UpperCAmelCase__ = SwinvaForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = SwinvaForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = self.type_sequence_label_size
UpperCAmelCase__ = SwinvaForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
snake_case__ = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = SwinvaModelTester(self )
UpperCAmelCase__ = ConfigTester(self ,config_class=lowerCamelCase__ ,embed_dim=37 )
def __lowerCAmelCase ( self : Dict ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def __lowerCAmelCase ( self : Any ):
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(lowerCamelCase__ )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = True
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
UpperCAmelCase__ = outputs.attentions
UpperCAmelCase__ = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ = True
UpperCAmelCase__ = config.window_size**2
UpperCAmelCase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
UpperCAmelCase__ = len(lowerCamelCase__ )
# Check attention is always last and order is fine
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
if hasattr(self.model_tester ,'num_hidden_states_types' ):
UpperCAmelCase__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase__ = 2
self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase__ ) )
UpperCAmelCase__ = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
UpperCAmelCase__ = outputs.hidden_states
UpperCAmelCase__ = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ )
# Swinv2 has a different seq_length
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase__ ) ,lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = reshaped_hidden_states[0].shape
UpperCAmelCase__ = (
reshaped_hidden_states[0].view(lowerCamelCase__ ,lowerCamelCase__ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = 3
UpperCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
self.check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
self.check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,(padded_height, padded_width) )
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def __lowerCAmelCase ( self : int ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = SwinvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Tuple ):
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
lowerCamelCase__ )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
UpperCAmelCase__ = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**lowerCamelCase__ )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
UpperCAmelCase__ = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
def a_ ( ):
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
lowerCAmelCase__ : Any = generate_large_matrix()
lowerCAmelCase__ : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def a_ ( lowerCamelCase ):
assert all(row == sorted(lowerCamelCase , reverse=lowerCamelCase ) for row in grid )
assert all(list(lowerCamelCase ) == sorted(lowerCamelCase , reverse=lowerCamelCase ) for col in zip(*lowerCamelCase ) )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(lowerCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase__ = (left + right) // 2
UpperCAmelCase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase__ = mid + 1
else:
UpperCAmelCase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(grid[0] )
for i in range(len(lowerCamelCase ) ):
UpperCAmelCase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowerCamelCase ) * len(grid[0] )) - total
def a_ ( lowerCamelCase ):
return len([number for row in grid for number in row if number < 0] )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = 0
for row in grid:
for i, number in enumerate(lowerCamelCase ):
if number < 0:
total += len(lowerCamelCase ) - i
break
return total
def a_ ( ):
from timeit import timeit
print('Running benchmarks' )
UpperCAmelCase__ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase__ = timeit(f'''{func}(grid=grid)''' , setup=lowerCamelCase , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 98 | """simple docstring"""
import os
import numpy
import onnx
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = a.name
UpperCAmelCase__ = b.name
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = a == b
UpperCAmelCase__ = name_a
UpperCAmelCase__ = name_b
return res
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase , lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase , lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = os.path.dirname(lowerCamelCase )
UpperCAmelCase__ = os.path.basename(lowerCamelCase )
UpperCAmelCase__ = onnx.load(os.path.join(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = set()
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
for i in range(len(lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase )
dup_set.add(lowerCamelCase )
UpperCAmelCase__ = inits[j].data_type
UpperCAmelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase )
total_reduced_size += mem_size
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase )
else:
UpperCAmelCase__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
UpperCAmelCase__ = sorted(lowerCamelCase )
_remove_dup_initializers_from_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 'optimized_' + model_file_name
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
onnx.save(lowerCamelCase , lowerCamelCase )
return new_model
| 98 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = 1
@register_to_config
def __init__( self : str ,lowerCamelCase__ : int=2_000 ,lowerCamelCase__ : Optional[Any]=0.1 ,lowerCamelCase__ : Optional[int]=20 ,lowerCamelCase__ : Union[str, Any]=1e-3 ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, torch.device] = None ):
UpperCAmelCase__ = torch.linspace(1 ,self.config.sampling_eps ,lowerCamelCase__ ,device=lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : int=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCAmelCase__ = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCAmelCase__ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCAmelCase__ = std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCAmelCase__ = std.unsqueeze(-1 )
UpperCAmelCase__ = -score / std
# compute
UpperCAmelCase__ = -1.0 / len(self.timesteps )
UpperCAmelCase__ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCAmelCase__ = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCAmelCase__ = beta_t.unsqueeze(-1 )
UpperCAmelCase__ = -0.5 * beta_t * x
UpperCAmelCase__ = torch.sqrt(lowerCamelCase__ )
UpperCAmelCase__ = drift - diffusion**2 * score
UpperCAmelCase__ = x + drift * dt
# add noise
UpperCAmelCase__ = randn_tensor(x.shape ,layout=x.layout ,generator=lowerCamelCase__ ,device=x.device ,dtype=x.dtype )
UpperCAmelCase__ = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Optional[Any] ):
return self.config.num_train_timesteps
| 98 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = ShapEImgaImgPipeline
snake_case__ = ["image"]
snake_case__ = ["image"]
snake_case__ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def __lowerCAmelCase ( self : List[str] ):
return 32
@property
def __lowerCAmelCase ( self : str ):
return 32
@property
def __lowerCAmelCase ( self : int ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : List[Any] ):
return 8
@property
def __lowerCAmelCase ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
UpperCAmelCase__ = CLIPVisionModel(lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = CLIPImageProcessor(
crop_size=224 ,do_center_crop=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,resample=3 ,size=224 ,)
return image_processor
@property
def __lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCAmelCase__ = PriorTransformer(**lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ = ShapERenderer(**lowerCamelCase__ )
return model
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = self.dummy_prior
UpperCAmelCase__ = self.dummy_image_encoder
UpperCAmelCase__ = self.dummy_image_processor
UpperCAmelCase__ = self.dummy_renderer
UpperCAmelCase__ = HeunDiscreteScheduler(
beta_schedule='exp' ,num_train_timesteps=1_024 ,prediction_type='sample' ,use_karras_sigmas=lowerCamelCase__ ,clip_sample=lowerCamelCase__ ,clip_sample_range=1.0 ,)
UpperCAmelCase__ = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str=0 ):
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCAmelCase__ = output.images[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase__ = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Tuple ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = torch_device == 'cpu'
UpperCAmelCase__ = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=lowerCamelCase__ ,relax_max_difference=lowerCamelCase__ ,)
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ = batch_size * [inputs[key]]
UpperCAmelCase__ = pipe(**lowerCamelCase__ ,num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
UpperCAmelCase__ = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
UpperCAmelCase__ = pipe(
lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='np' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase ):
return str(lowerCamelCase ) == str(lowerCamelCase )[::-1]
def a_ ( lowerCamelCase ):
return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] )
def a_ ( lowerCamelCase = 1_0_0_0_0 ):
UpperCAmelCase__ = []
for num in range(1 , lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = num
while iterations < 5_0:
UpperCAmelCase__ = sum_reverse(lowerCamelCase )
iterations += 1
if is_palindrome(lowerCamelCase ):
break
else:
lychrel_nums.append(lowerCamelCase )
return len(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | """simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,lowerCamelCase__ : int="</s>" ,lowerCamelCase__ : str="<unk>" ,lowerCamelCase__ : Union[str, Any]="<pad>" ,lowerCamelCase__ : int=125 ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : Union[str, Any] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase__ = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase__ = len(set(filter(lambda lowerCamelCase__ : bool('extra_id' in str(lowerCamelCase__ ) ) ,lowerCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token
super().__init__(
eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,extra_ids=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = extra_ids
UpperCAmelCase__ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase__ = len(self.special_tokens_encoder )
UpperCAmelCase__ = len(lowerCamelCase__ )
for i, token in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = self.vocab_size + i - n
UpperCAmelCase__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ):
if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
return token_ids_a + token_ids_a
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = [chr(lowerCamelCase__ ) for i in text.encode('utf-8' )]
return tokens
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : str ):
if token in self.special_tokens_encoder:
UpperCAmelCase__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = self.added_tokens_encoder[token]
elif len(lowerCamelCase__ ) != 1:
UpperCAmelCase__ = self.unk_token_id
else:
UpperCAmelCase__ = ord(lowerCamelCase__ ) + self._num_special_tokens
return token_id
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Any ):
if index in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[index]
else:
UpperCAmelCase__ = chr(index - self._num_special_tokens )
return token
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = b''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
else:
UpperCAmelCase__ = bytes([ord(lowerCamelCase__ )] )
bstring += tok_string
UpperCAmelCase__ = bstring.decode('utf-8' ,errors='ignore' )
return string
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
return ()
| 98 | 1 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase__ : List[str] = open # noqa: we just need to have a builtin inside this module to test it properly
| 98 | """simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
lowerCAmelCase__ : Union[str, Any] = namedtuple('CoinsDistribResult', 'moves excess')
def a_ ( lowerCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.left )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.right )
UpperCAmelCase__ = 1 - left_distrib_excess
UpperCAmelCase__ = 1 - right_distrib_excess
UpperCAmelCase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
UpperCAmelCase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase , lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] ,lowerCamelCase__ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
UpperCAmelCase__ = nn.ModuleList(lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : Union[torch.Tensor, float, int] ,lowerCamelCase__ : torch.Tensor ,lowerCamelCase__ : List[torch.tensor] ,lowerCamelCase__ : List[float] ,lowerCamelCase__ : Optional[torch.Tensor] = None ,lowerCamelCase__ : Optional[torch.Tensor] = None ,lowerCamelCase__ : Optional[torch.Tensor] = None ,lowerCamelCase__ : Optional[Dict[str, Any]] = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ,):
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase__ ,lowerCamelCase__ ,self.nets ) ):
UpperCAmelCase__ , UpperCAmelCase__ = controlnet(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,)
# merge samples
if i == 0:
UpperCAmelCase__ , UpperCAmelCase__ = down_samples, mid_sample
else:
UpperCAmelCase__ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase__ ,lowerCamelCase__ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Union[str, os.PathLike] ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Callable = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[str] = None ,):
UpperCAmelCase__ = 0
UpperCAmelCase__ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase__ ,is_main_process=lowerCamelCase__ ,save_function=lowerCamelCase__ ,safe_serialization=lowerCamelCase__ ,variant=lowerCamelCase__ ,)
idx += 1
UpperCAmelCase__ = model_path_to_save + f'''_{idx}'''
@classmethod
def __lowerCAmelCase ( cls : str ,lowerCamelCase__ : Optional[Union[str, os.PathLike]] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCAmelCase__ = pretrained_model_path
while os.path.isdir(lowerCamelCase__ ):
UpperCAmelCase__ = ControlNetModel.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
controlnets.append(lowerCamelCase__ )
idx += 1
UpperCAmelCase__ = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowerCamelCase__ )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowerCamelCase__ ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowerCamelCase__ )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(lowerCamelCase__ )
| 98 | """simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def __lowerCAmelCase ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
def __lowerCAmelCase ( self : int ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 98 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ : Optional[Any] = logging.getLogger()
def a_ ( ):
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
UpperCAmelCase__ = parser.parse_args()
return args.f
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'run_glue_deebert.py' )
with patch.object(lowerCamelCase__ ,'argv' ,lowerCamelCase__ ):
UpperCAmelCase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ ,0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase = 1_0**1_2 ):
UpperCAmelCase__ = 1
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | """simple docstring"""
import argparse
lowerCAmelCase__ : List[str] = 'docs/source/_static/js/custom.js'
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 98 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase ):
if n == 1 or not isinstance(lowerCamelCase , lowerCamelCase ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = 2
while digits < n:
index += 1
UpperCAmelCase__ = len(str(fibonacci(lowerCamelCase ) ) )
return index
def a_ ( lowerCamelCase = 1_0_0_0 ):
return fibonacci_digits_index(lowerCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 98 | """simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a_ ( lowerCamelCase ):
return np.dot(lowerCamelCase , lowerCamelCase )
class snake_case :
"""simple docstring"""
def __init__( self : int ,*,
lowerCamelCase__ : float = np.inf ,lowerCamelCase__ : str = "linear" ,lowerCamelCase__ : float = 0.0 ,):
UpperCAmelCase__ = regularization
UpperCAmelCase__ = gamma
if kernel == "linear":
UpperCAmelCase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
UpperCAmelCase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase__ = f'''Unknown kernel: {kernel}'''
raise ValueError(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.dot(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : list[ndarray] ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = observations
UpperCAmelCase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
def to_minimize(lowerCamelCase__ : ndarray ) -> float:
UpperCAmelCase__ = 0
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(lowerCamelCase__ )
UpperCAmelCase__ = LinearConstraint(lowerCamelCase__ ,0 ,0 )
UpperCAmelCase__ = Bounds(0 ,self.regularization )
UpperCAmelCase__ = minimize(
lowerCamelCase__ ,np.ones(lowerCamelCase__ ) ,bounds=lowerCamelCase__ ,constraints=[ly_contraint] ).x
UpperCAmelCase__ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase__ = 0
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
UpperCAmelCase__ = s / n
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,lowerCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "Speech2TextFeatureExtractor"
snake_case__ = "Speech2TextTokenizer"
def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Tuple ):
super().__init__(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = self.feature_extractor
UpperCAmelCase__ = False
def __call__( self : Optional[int] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ ,**lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
UpperCAmelCase__ = kwargs.pop('raw_speech' )
else:
UpperCAmelCase__ = kwargs.pop('audio' ,lowerCamelCase__ )
UpperCAmelCase__ = kwargs.pop('sampling_rate' ,lowerCamelCase__ )
UpperCAmelCase__ = kwargs.pop('text' ,lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
UpperCAmelCase__ = args[0]
UpperCAmelCase__ = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
UpperCAmelCase__ = self.feature_extractor(lowerCamelCase__ ,*lowerCamelCase__ ,sampling_rate=lowerCamelCase__ ,**lowerCamelCase__ )
if text is not None:
UpperCAmelCase__ = self.tokenizer(lowerCamelCase__ ,**lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase__ = encodings['input_ids']
return inputs
def __lowerCAmelCase ( self : Tuple ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : List[str] ):
return self.tokenizer.batch_decode(*lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : str ):
return self.tokenizer.decode(*lowerCamelCase__ ,**lowerCamelCase__ )
@contextmanager
def __lowerCAmelCase ( self : Tuple ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
UpperCAmelCase__ = True
UpperCAmelCase__ = self.tokenizer
yield
UpperCAmelCase__ = self.feature_extractor
UpperCAmelCase__ = False
| 98 | """simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase__ : List[Any] = '\\n\n'
lowerCAmelCase__ : Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCAmelCase__ : str = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : List[str]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = 'cuda'
else:
UpperCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = model.to(lowerCamelCase__ )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ )
UpperCAmelCase__ = encodings['input_ids']
UpperCAmelCase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ):
UpperCAmelCase__ = min(start_index + batch_size ,len(lowerCamelCase__ ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 98 | 1 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowerCAmelCase__ : Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowerCAmelCase__ : List[str] = [0, 25, 50]
lowerCAmelCase__ : List[Any] = [25, 50, 75]
lowerCAmelCase__ : Any = fuzz.membership.trimf(X, abca)
lowerCAmelCase__ : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowerCAmelCase__ : Optional[Any] = np.ones(75)
lowerCAmelCase__ : List[Any] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowerCAmelCase__ : List[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowerCAmelCase__ : Optional[Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowerCAmelCase__ : Tuple = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowerCAmelCase__ : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowerCAmelCase__ : List[str] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowerCAmelCase__ : int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowerCAmelCase__ : Dict = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowerCAmelCase__ : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 98 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : str = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "xglm"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any ,lowerCamelCase__ : Any=256_008 ,lowerCamelCase__ : Optional[Any]=2_048 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : List[str]=4_096 ,lowerCamelCase__ : Tuple=24 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Tuple=2 ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = ffn_dim
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = attention_heads
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = init_std
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98 | 1 |
"""simple docstring"""
class snake_case :
"""simple docstring"""
def __init__( self : str ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = name
UpperCAmelCase__ = value
UpperCAmelCase__ = weight
def __repr__( self : List[Any] ):
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def __lowerCAmelCase ( self : Tuple ):
return self.value
def __lowerCAmelCase ( self : Tuple ):
return self.name
def __lowerCAmelCase ( self : Optional[int] ):
return self.weight
def __lowerCAmelCase ( self : int ):
return self.value / self.weight
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = []
for i in range(len(lowerCamelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = sorted(lowerCamelCase , key=lowerCamelCase , reverse=lowerCamelCase )
UpperCAmelCase__ = []
UpperCAmelCase__ , UpperCAmelCase__ = 0.0, 0.0
for i in range(len(lowerCamelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | """simple docstring"""
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Optional[Any] ):
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ):
UpperCAmelCase__ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCamelCase__ )
UpperCAmelCase__ = self.values[key]
def __lowerCAmelCase ( self : Optional[Any] ):
return (
sum(self.charge_factor - len(lowerCamelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCamelCase__ ) == 0
):
return key
return super()._collision_resolution(lowerCamelCase__ ,lowerCamelCase__ )
| 98 | """simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCAmelCase__ : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ : Tuple = F"""down_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : int = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ : Tuple = F"""up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : int = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ : Any = F"""up_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : List[str] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ : List[str] = F"""down_blocks.{i}.downsamplers.0.conv."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ : Dict = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : List[Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ : str = 'mid_block.attentions.0.'
lowerCAmelCase__ : Union[str, Any] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ : int = F"""mid_block.resnets.{j}."""
lowerCAmelCase__ : List[str] = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a_ ( lowerCamelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
UpperCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ : List[str] = F"""encoder.down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : List[Any] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.downsamplers.0."""
lowerCAmelCase__ : str = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ : int = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : str = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ : Dict = F"""decoder.up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Optional[int] = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ : Any = F"""mid_block.resnets.{i}."""
lowerCAmelCase__ : Any = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def a_ ( lowerCamelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase__ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
UpperCAmelCase__ = reshape_weight_for_sd(lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCAmelCase__ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ : int = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
UpperCAmelCase__ = k[: -len('.q_proj.weight' )]
UpperCAmelCase__ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
UpperCAmelCase__ = k[: -len('.q_proj.bias' )]
UpperCAmelCase__ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
return new_state_dict
def a_ ( lowerCamelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ : Tuple = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : List[str] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : int = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ : Union[str, Any] = load_file(unet_path, device='cpu')
else:
lowerCAmelCase__ : str = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : Dict = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCAmelCase__ : Optional[Any] = load_file(vae_path, device='cpu')
else:
lowerCAmelCase__ : Optional[int] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : List[str] = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCAmelCase__ : Tuple = load_file(text_enc_path, device='cpu')
else:
lowerCAmelCase__ : Any = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCAmelCase__ : Any = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCAmelCase__ : Any = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ : Dict = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ : List[Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ : str = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ : List[Any] = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ : Tuple = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ : str = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ : Optional[Any] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ : List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ : int = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ : List[str] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 98 | 1 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a_ ( lowerCamelCase ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowerCAmelCase__ : List[Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : ArgumentParser ):
UpperCAmelCase__ = parser.add_parser(
'convert' ,help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' ,)
train_parser.add_argument('--model_type' ,type=lowerCamelCase__ ,required=lowerCamelCase__ ,help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' ,type=lowerCamelCase__ ,required=lowerCamelCase__ ,help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' ,type=lowerCamelCase__ ,required=lowerCamelCase__ ,help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' ,type=lowerCamelCase__ ,default='' ,help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,help='Optional fine-tuning task name if the TF model was a finetuned model.' ,)
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,*lowerCamelCase__ : Any ,):
UpperCAmelCase__ = logging.get_logger('transformers-cli/converting' )
self._logger.info(f'''Loading model {model_type}''' )
UpperCAmelCase__ = model_type
UpperCAmelCase__ = tf_checkpoint
UpperCAmelCase__ = pytorch_dump_output
UpperCAmelCase__ = config
UpperCAmelCase__ = finetuning_task_name
def __lowerCAmelCase ( self : str ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase__ = self._tf_checkpoint
UpperCAmelCase__ = ''
else:
UpperCAmelCase__ = self._tf_checkpoint
UpperCAmelCase__ = ''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCamelCase__ ,self._config ,self._pytorch_dump_output ,lowerCamelCase__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint ,self._config ,self._pytorch_dump_output ,self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 98 | """simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase__ : Union[str, Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase__ : Any = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase__ : Union[str, Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len([g for position, g in enumerate(lowerCamelCase ) if g == main_target[position]] )
return (item, float(lowerCamelCase ))
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = random.randint(0 , len(lowerCamelCase ) - 1 )
UpperCAmelCase__ = parent_a[:random_slice] + parent_a[random_slice:]
UpperCAmelCase__ = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
UpperCAmelCase__ = random.choice(lowerCamelCase )
return "".join(lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
UpperCAmelCase__ = []
# Generate more children proportionally to the fitness score.
UpperCAmelCase__ = int(parent_a[1] * 1_0_0 ) + 1
UpperCAmelCase__ = 1_0 if child_n >= 1_0 else child_n
for _ in range(lowerCamelCase ):
UpperCAmelCase__ = population_score[random.randint(0 , lowerCamelCase )][0]
UpperCAmelCase__ , UpperCAmelCase__ = crossover(parent_a[0] , lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(lowerCamelCase , lowerCamelCase ) )
pop.append(mutate(lowerCamelCase , lowerCamelCase ) )
return pop
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
UpperCAmelCase__ = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCAmelCase__ = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCAmelCase__ = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowerCamelCase )
# Generate random starting population.
UpperCAmelCase__ = []
for _ in range(lowerCamelCase ):
population.append(''.join([random.choice(lowerCamelCase ) for i in range(len(lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCAmelCase__ , UpperCAmelCase__ = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCAmelCase__ = [evaluate(lowerCamelCase , lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
UpperCAmelCase__ = sorted(lowerCamelCase , key=lambda lowerCamelCase : x[1] , reverse=lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCAmelCase__ = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCamelCase )
# Normalize population score to be between 0 and 1.
UpperCAmelCase__ = [
(item, score / len(lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(lowerCamelCase ):
population.extend(select(population_score[int(lowerCamelCase )] , lowerCamelCase , lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
lowerCAmelCase__ : int = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 98 | """simple docstring"""
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = BeautifulSoup(requests.get(lowerCamelCase , params=lowerCamelCase ).content , 'html.parser' )
UpperCAmelCase__ = soup.find('div' , attrs={'class': 'gs_ri'} )
UpperCAmelCase__ = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(lowerCamelCase )
return len(lowerCamelCase ) == 9 and set(lowerCamelCase ) == set('123456789' )
def a_ ( ):
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
UpperCAmelCase__ = 1_0_0_0_0_2 * base_num
if is_9_pandigital(lowerCamelCase ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
UpperCAmelCase__ = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(lowerCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | """simple docstring"""
def a_ ( lowerCamelCase ):
return str(lowerCamelCase ) == str(lowerCamelCase )[::-1]
def a_ ( lowerCamelCase ):
return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] )
def a_ ( lowerCamelCase = 1_0_0_0_0 ):
UpperCAmelCase__ = []
for num in range(1 , lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = num
while iterations < 5_0:
UpperCAmelCase__ = sum_reverse(lowerCamelCase )
iterations += 1
if is_palindrome(lowerCamelCase ):
break
else:
lychrel_nums.append(lowerCamelCase )
return len(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | 1 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowerCAmelCase__ : str = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
lowerCAmelCase__ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def a_ ( ):
UpperCAmelCase__ = 'https://pypi.org/pypi/diffusers/json'
UpperCAmelCase__ = json.loads(request.urlopen(lowerCamelCase ).read() )['releases'].keys()
return sorted(lowerCamelCase , key=lambda lowerCamelCase : version.Version(lowerCamelCase ) )
def a_ ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
UpperCAmelCase__ = Path(lowerCamelCase ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def a_ ( lowerCamelCase ):
init_hf_modules()
UpperCAmelCase__ = Path(lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
UpperCAmelCase__ = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase__ = f.read()
# Imports of the form `import .xxx`
UpperCAmelCase__ = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase ) )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = False
UpperCAmelCase__ = [module_file]
UpperCAmelCase__ = []
# Let's recurse through all relative imports
while not no_change:
UpperCAmelCase__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase ) )
UpperCAmelCase__ = Path(lowerCamelCase ).parent
UpperCAmelCase__ = [str(module_path / m ) for m in new_imports]
UpperCAmelCase__ = [f for f in new_import_files if f not in all_relative_imports]
UpperCAmelCase__ = [f'''{f}.py''' for f in new_import_files]
UpperCAmelCase__ = len(lowerCamelCase ) == 0
all_relative_imports.extend(lowerCamelCase )
return all_relative_imports
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase__ = f.read()
# Imports of the form `import xxx`
UpperCAmelCase__ = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
UpperCAmelCase__ = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
UpperCAmelCase__ = list(set(lowerCamelCase ) )
UpperCAmelCase__ = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase )
except ImportError:
missing_packages.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f'''{", ".join(lowerCamelCase )}. Run `pip install {" ".join(lowerCamelCase )}`''' )
return get_relative_imports(lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = module_path.replace(os.path.sep , '.' )
UpperCAmelCase__ = importlib.import_module(lowerCamelCase )
if class_name is None:
return find_pipeline_class(lowerCamelCase )
return getattr(lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase ):
from ..pipelines import DiffusionPipeline
UpperCAmelCase__ = dict(inspect.getmembers(lowerCamelCase , inspect.isclass ) )
UpperCAmelCase__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
f''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
f''' {loaded_module}.''' )
UpperCAmelCase__ = cls
return pipeline_class
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , ):
UpperCAmelCase__ = str(lowerCamelCase )
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.isfile(lowerCamelCase ):
UpperCAmelCase__ = module_file_or_url
UpperCAmelCase__ = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
UpperCAmelCase__ = get_diffusers_versions()
# cut ".dev0"
UpperCAmelCase__ = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
UpperCAmelCase__ = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
UpperCAmelCase__ = f'''v{revision}'''
elif revision == "main":
UpperCAmelCase__ = revision
else:
raise ValueError(
f'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
f''' {", ".join(available_versions + ["main"] )}.''' )
# community pipeline on GitHub
UpperCAmelCase__ = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase , pipeline=lowerCamelCase )
try:
UpperCAmelCase__ = cached_download(
lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , proxies=lowerCamelCase , resume_download=lowerCamelCase , local_files_only=lowerCamelCase , use_auth_token=lowerCamelCase , )
UpperCAmelCase__ = 'git'
UpperCAmelCase__ = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
UpperCAmelCase__ = hf_hub_download(
lowerCamelCase , lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , proxies=lowerCamelCase , resume_download=lowerCamelCase , local_files_only=lowerCamelCase , use_auth_token=lowerCamelCase , )
UpperCAmelCase__ = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
UpperCAmelCase__ = check_imports(lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
UpperCAmelCase__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase )
UpperCAmelCase__ = Path(lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
UpperCAmelCase__ = f'''{module_needed}.py'''
shutil.copy(os.path.join(lowerCamelCase , lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = use_auth_token
elif use_auth_token is True:
UpperCAmelCase__ = HfFolder.get_token()
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = model_info(lowerCamelCase , revision=lowerCamelCase , token=lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCAmelCase__ = submodule_path / commit_hash
UpperCAmelCase__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase , f'''{module_needed}.py''' , cache_dir=lowerCamelCase , force_download=lowerCamelCase , resume_download=lowerCamelCase , proxies=lowerCamelCase , use_auth_token=lowerCamelCase , revision=lowerCamelCase , local_files_only=lowerCamelCase , )
return os.path.join(lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , **lowerCamelCase , ):
UpperCAmelCase__ = get_cached_module_file(
lowerCamelCase , lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , resume_download=lowerCamelCase , proxies=lowerCamelCase , use_auth_token=lowerCamelCase , revision=lowerCamelCase , local_files_only=lowerCamelCase , )
return get_class_in_module(lowerCamelCase , final_module.replace('.py' , '' ) )
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : str = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "ClapFeatureExtractor"
snake_case__ = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Tuple ):
super().__init__(lowerCamelCase__ ,lowerCamelCase__ )
def __call__( self : List[Any] ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : Any=None ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = kwargs.pop('sampling_rate' ,lowerCamelCase__ )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
UpperCAmelCase__ = self.tokenizer(lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
if audios is not None:
UpperCAmelCase__ = self.feature_extractor(
lowerCamelCase__ ,sampling_rate=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
if text is not None and audios is not None:
UpperCAmelCase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) ,tensor_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[Any] ):
return self.tokenizer.batch_decode(*lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Optional[int] ):
return self.tokenizer.decode(*lowerCamelCase__ ,**lowerCamelCase__ )
@property
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = self.tokenizer.model_input_names
UpperCAmelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 98 | """simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Union[str, Any] ):
pass
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = np.array(lowerCamelCase )
UpperCAmelCase__ = npimg.shape
return {"hash": hashimage(lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
snake_case__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = MaskGenerationPipeline(model=lowerCamelCase__ ,image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
@slow
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = pipeline('mask-generation' ,model='facebook/sam-vit-huge' )
UpperCAmelCase__ = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] ,)
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = 'facebook/sam-vit-huge'
UpperCAmelCase__ = pipeline('mask-generation' ,model=lowerCamelCase__ )
UpperCAmelCase__ = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase__ = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] ,)
| 98 | 1 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ : Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ : List[Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ : List[Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ : Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
lowerCAmelCase__ : Any = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
lowerCAmelCase__ : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
lowerCAmelCase__ : Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase__ : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase__ : str = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case__ = DPRContextEncoderTokenizer
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case__ = DPRQuestionEncoderTokenizer
lowerCAmelCase__ : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowerCAmelCase__ : Tuple = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowerCAmelCase__ : Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__UpperCAmelCase )
class snake_case :
"""simple docstring"""
def __call__( self : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Optional[str] = None ,lowerCamelCase__ : Union[bool, str] = False ,lowerCamelCase__ : Union[bool, str] = False ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : Optional[bool] = None ,**lowerCamelCase__ : List[Any] ,):
if titles is None and texts is None:
return super().__call__(
lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,**lowerCamelCase__ ,)
elif titles is None or texts is None:
UpperCAmelCase__ = titles if texts is None else texts
return super().__call__(
lowerCamelCase__ ,lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = titles if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else [titles]
UpperCAmelCase__ = texts if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else [texts]
UpperCAmelCase__ = len(lowerCamelCase__ )
UpperCAmelCase__ = questions if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else [questions] * n_passages
assert len(lowerCamelCase__ ) == len(
lowerCamelCase__ ), f'''There should be as many titles than texts but got {len(lowerCamelCase__ )} titles and {len(lowerCamelCase__ )} texts.'''
UpperCAmelCase__ = super().__call__(lowerCamelCase__ ,lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ )['input_ids']
UpperCAmelCase__ = super().__call__(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ )['input_ids']
UpperCAmelCase__ = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase__ ,lowerCamelCase__ )
]
}
if return_attention_mask is not False:
UpperCAmelCase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase__ = attention_mask
return self.pad(lowerCamelCase__ ,padding=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : BatchEncoding ,lowerCamelCase__ : DPRReaderOutput ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : int = 64 ,lowerCamelCase__ : int = 4 ,):
UpperCAmelCase__ = reader_input['input_ids']
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = reader_output[:3]
UpperCAmelCase__ = len(lowerCamelCase__ )
UpperCAmelCase__ = sorted(range(lowerCamelCase__ ) ,reverse=lowerCamelCase__ ,key=relevance_logits.__getitem__ )
UpperCAmelCase__ = []
for doc_id in sorted_docs:
UpperCAmelCase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase__ = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase__ = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase__ = len(lowerCamelCase__ )
UpperCAmelCase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=lowerCamelCase__ ,top_spans=lowerCamelCase__ ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=lowerCamelCase__ ,start_index=lowerCamelCase__ ,end_index=lowerCamelCase__ ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(lowerCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,):
UpperCAmelCase__ = []
for start_index, start_score in enumerate(lowerCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase__ = sorted(lowerCamelCase__ ,key=lambda lowerCamelCase__ : x[1] ,reverse=lowerCamelCase__ )
UpperCAmelCase__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
UpperCAmelCase__ = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__UpperCAmelCase )
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case__ = ["input_ids", "attention_mask"]
snake_case__ = DPRReaderTokenizer
| 98 | """simple docstring"""
import functools
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = len(lowerCamelCase )
@functools.cache
def min_distance(lowerCamelCase , lowerCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
UpperCAmelCase__ = 1_0_2_4
UpperCAmelCase__ = 4_0_9_6
UpperCAmelCase__ = 2_4
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = [5, 1_1, 1_7, 2_3]
UpperCAmelCase__ = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
UpperCAmelCase__ = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
UpperCAmelCase__ = 7_6_8
UpperCAmelCase__ = [1, 1, 1, 0.5]
UpperCAmelCase__ = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
UpperCAmelCase__ = 1_5_0
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = (1, 3_8_4, 3_8_4)
UpperCAmelCase__ = False
UpperCAmelCase__ = 'project'
if "ade" in checkpoint_url:
UpperCAmelCase__ = True
UpperCAmelCase__ = 7_6_8
UpperCAmelCase__ = [1, 1, 1, 0.5]
UpperCAmelCase__ = 1_5_0
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 'huggingface/label-files'
UpperCAmelCase__ = 'ade20k-id2label.json'
UpperCAmelCase__ = json.load(open(cached_download(hf_hub_url(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) ) , 'r' ) )
UpperCAmelCase__ = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase__ = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
UpperCAmelCase__ = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
UpperCAmelCase__ = name.replace('patch_embed' , '' )
if "pos_embed" in name:
UpperCAmelCase__ = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
UpperCAmelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
UpperCAmelCase__ = name.replace('proj' , 'projection' )
if "blocks" in name:
UpperCAmelCase__ = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
UpperCAmelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCAmelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
UpperCAmelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
UpperCAmelCase__ = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
UpperCAmelCase__ = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
UpperCAmelCase__ = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
UpperCAmelCase__ = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
UpperCAmelCase__ = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
UpperCAmelCase__ = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
UpperCAmelCase__ = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
UpperCAmelCase__ = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase__ = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
UpperCAmelCase__ = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
UpperCAmelCase__ = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
UpperCAmelCase__ = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
UpperCAmelCase__ = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
UpperCAmelCase__ = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase__ = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase__ = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase__ = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase__ = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase__ = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase__ = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase__ = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase__ = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase__ = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase__ = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase__ = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
UpperCAmelCase__ = name.replace('pretrained' , 'dpt' )
if "bn" in name:
UpperCAmelCase__ = name.replace('bn' , 'batch_norm' )
if "head" in name:
UpperCAmelCase__ = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
UpperCAmelCase__ = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
UpperCAmelCase__ = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
UpperCAmelCase__ = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
UpperCAmelCase__ = name.replace('..' , '.' )
if "stem.conv" in name:
UpperCAmelCase__ = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
UpperCAmelCase__ = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
UpperCAmelCase__ = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
UpperCAmelCase__ = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
UpperCAmelCase__ = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
UpperCAmelCase__ = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
UpperCAmelCase__ = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def a_ ( lowerCamelCase , lowerCamelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
UpperCAmelCase__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase__ = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ = in_proj_bias[-config.hidden_size :]
def a_ ( ):
UpperCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase__ = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ = get_dpt_config(lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCAmelCase__ = torch.load(lowerCamelCase , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase__ = state_dict.pop(lowerCamelCase )
UpperCAmelCase__ = val
# read in qkv matrices
read_in_q_k_v(lowerCamelCase , lowerCamelCase )
# load HuggingFace model
UpperCAmelCase__ = DPTForSemanticSegmentation(lowerCamelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# Check outputs on an image
UpperCAmelCase__ = 4_8_0 if 'ade' in checkpoint_url else 3_8_4
UpperCAmelCase__ = DPTImageProcessor(size=lowerCamelCase )
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(lowerCamelCase , return_tensors='pt' )
# forward pass
UpperCAmelCase__ = model(**lowerCamelCase ).logits if 'ade' in checkpoint_url else model(**lowerCamelCase ).predicted_depth
if show_prediction:
UpperCAmelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=lowerCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
lowerCAmelCase__ : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 98 | """simple docstring"""
from PIL import Image
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(lowerCamelCase ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowerCAmelCase__ : Any = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
import queue
class snake_case :
"""simple docstring"""
def __init__( self : int ,lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = data
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def a_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
UpperCAmelCase__ = input('Enter the value of the root node: ' ).strip().lower()
UpperCAmelCase__ = queue.Queue()
UpperCAmelCase__ = TreeNode(int(lowerCamelCase ) )
q.put(lowerCamelCase )
while not q.empty():
UpperCAmelCase__ = q.get()
UpperCAmelCase__ = f'''Enter the left node of {node_found.data}: '''
UpperCAmelCase__ = input(lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ = TreeNode(int(lowerCamelCase ) )
UpperCAmelCase__ = left_node
q.put(lowerCamelCase )
UpperCAmelCase__ = f'''Enter the right node of {node_found.data}: '''
UpperCAmelCase__ = input(lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ = TreeNode(int(lowerCamelCase ) )
UpperCAmelCase__ = right_node
q.put(lowerCamelCase )
raise
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
UpperCAmelCase__ = queue.Queue()
q.put(lowerCamelCase )
while not q.empty():
UpperCAmelCase__ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
UpperCAmelCase__ = queue.Queue()
q.put(lowerCamelCase )
while not q.empty():
UpperCAmelCase__ = []
while not q.empty():
UpperCAmelCase__ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowerCamelCase )
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
UpperCAmelCase__ = []
UpperCAmelCase__ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(lowerCamelCase )
UpperCAmelCase__ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase__ = stack.pop()
# start to traverse its right child
UpperCAmelCase__ = n.right
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
UpperCAmelCase__ = []
UpperCAmelCase__ = node
while n or stack:
while n:
stack.append(lowerCamelCase )
UpperCAmelCase__ = n.left
UpperCAmelCase__ = stack.pop()
print(n.data , end=',' )
UpperCAmelCase__ = n.right
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
UpperCAmelCase__ , UpperCAmelCase__ = [], []
UpperCAmelCase__ = node
stacka.append(lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase__ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def a_ ( lowerCamelCase = "" , lowerCamelCase=5_0 , lowerCamelCase="*" ):
if not s:
return "\n" + width * char
UpperCAmelCase__ , UpperCAmelCase__ = divmod(width - len(lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
lowerCAmelCase__ : TreeNode = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 98 | """simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Tuple = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowerCAmelCase__ : Optional[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {'BertModelTest': 'BertModelTester'}
UpperCAmelCase__ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["image_processor", "tokenizer"]
snake_case__ = "Pix2StructImageProcessor"
snake_case__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = False
super().__init__(lowerCamelCase__ ,lowerCamelCase__ )
def __call__( self : Optional[int] ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = 2_048 ,lowerCamelCase__ : int = 0 ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCamelCase__ : Union[str, Any] ,):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCAmelCase__ = self.tokenizer
UpperCAmelCase__ = self.tokenizer(
text=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,stride=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_overflowing_tokens=lowerCamelCase__ ,return_special_tokens_mask=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_length=lowerCamelCase__ ,verbose=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCAmelCase__ = self.image_processor(
lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,max_patches=lowerCamelCase__ ,**lowerCamelCase__ )
else:
# add pixel_values and bbox
UpperCAmelCase__ = self.image_processor(
lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,max_patches=lowerCamelCase__ ,header_text=lowerCamelCase__ ,**lowerCamelCase__ )
if text is not None and not self.image_processor.is_vqa:
UpperCAmelCase__ = self.tokenizer(
text=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,stride=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_overflowing_tokens=lowerCamelCase__ ,return_special_tokens_mask=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_length=lowerCamelCase__ ,verbose=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ,)
if "attention_mask" in text_encoding:
UpperCAmelCase__ = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
UpperCAmelCase__ = text_encoding.pop('input_ids' )
else:
UpperCAmelCase__ = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def __lowerCAmelCase ( self : Optional[int] ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : Union[str, Any] ):
return self.tokenizer.batch_decode(*lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,*lowerCamelCase__ : str ,**lowerCamelCase__ : Tuple ):
return self.tokenizer.decode(*lowerCamelCase__ ,**lowerCamelCase__ )
@property
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.tokenizer.model_input_names
UpperCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCAmelCase__ : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ : Tuple = F"""down_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : int = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ : Tuple = F"""up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : int = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ : Any = F"""up_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : List[str] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ : List[str] = F"""down_blocks.{i}.downsamplers.0.conv."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ : Dict = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : List[Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ : str = 'mid_block.attentions.0.'
lowerCAmelCase__ : Union[str, Any] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ : int = F"""mid_block.resnets.{j}."""
lowerCAmelCase__ : List[str] = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a_ ( lowerCamelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
UpperCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ : List[str] = F"""encoder.down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : List[Any] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.downsamplers.0."""
lowerCAmelCase__ : str = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ : int = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : str = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ : Dict = F"""decoder.up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Optional[int] = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ : Any = F"""mid_block.resnets.{i}."""
lowerCAmelCase__ : Any = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def a_ ( lowerCamelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase__ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
UpperCAmelCase__ = reshape_weight_for_sd(lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCAmelCase__ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ : int = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
UpperCAmelCase__ = k[: -len('.q_proj.weight' )]
UpperCAmelCase__ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
UpperCAmelCase__ = k[: -len('.q_proj.bias' )]
UpperCAmelCase__ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
return new_state_dict
def a_ ( lowerCamelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ : Tuple = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : List[str] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : int = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ : Union[str, Any] = load_file(unet_path, device='cpu')
else:
lowerCAmelCase__ : str = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : Dict = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCAmelCase__ : Optional[Any] = load_file(vae_path, device='cpu')
else:
lowerCAmelCase__ : Optional[int] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : List[str] = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCAmelCase__ : Tuple = load_file(text_enc_path, device='cpu')
else:
lowerCAmelCase__ : Any = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCAmelCase__ : Any = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCAmelCase__ : Any = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ : Dict = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ : List[Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ : str = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ : List[Any] = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ : Tuple = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ : str = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ : Optional[Any] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ : List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ : int = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ : List[str] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = KandinskyVaaPipeline
snake_case__ = [
"image_embeds",
"negative_image_embeds",
]
snake_case__ = ["image_embeds", "negative_image_embeds"]
snake_case__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def __lowerCAmelCase ( self : int ):
return 32
@property
def __lowerCAmelCase ( self : Optional[int] ):
return 32
@property
def __lowerCAmelCase ( self : Tuple ):
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Dict ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return 100
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCAmelCase__ = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Any ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self : Dict ):
torch.manual_seed(0 )
UpperCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = self.dummy_unet
UpperCAmelCase__ = self.dummy_movq
UpperCAmelCase__ = DDIMScheduler(
num_train_timesteps=1_000 ,beta_schedule='linear' ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=lowerCamelCase__ ,)
UpperCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Dict=0 ):
UpperCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) ,return_dict=lowerCamelCase__ ,)[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
UpperCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
UpperCAmelCase__ = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' ,torch_dtype=torch.floataa )
UpperCAmelCase__ = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 'red cat, 4k photo'
UpperCAmelCase__ = torch.Generator(device='cuda' ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ = pipe_prior(
lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
UpperCAmelCase__ = torch.Generator(device='cuda' ).manual_seed(0 )
UpperCAmelCase__ = pipeline(
image_embeds=lowerCamelCase__ ,negative_image_embeds=lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=100 ,output_type='np' ,)
UpperCAmelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
| 98 | """simple docstring"""
import os
import numpy
import onnx
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = a.name
UpperCAmelCase__ = b.name
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = a == b
UpperCAmelCase__ = name_a
UpperCAmelCase__ = name_b
return res
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase , lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase , lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = os.path.dirname(lowerCamelCase )
UpperCAmelCase__ = os.path.basename(lowerCamelCase )
UpperCAmelCase__ = onnx.load(os.path.join(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = set()
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
for i in range(len(lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase )
dup_set.add(lowerCamelCase )
UpperCAmelCase__ = inits[j].data_type
UpperCAmelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase )
total_reduced_size += mem_size
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase )
else:
UpperCAmelCase__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
UpperCAmelCase__ = sorted(lowerCamelCase )
_remove_dup_initializers_from_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 'optimized_' + model_file_name
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
onnx.save(lowerCamelCase , lowerCamelCase )
return new_model
| 98 | 1 |
"""simple docstring"""
import math
lowerCAmelCase__ : Any = 10
lowerCAmelCase__ : int = 7
lowerCAmelCase__ : int = BALLS_PER_COLOUR * NUM_COLOURS
def a_ ( lowerCamelCase = 2_0 ):
UpperCAmelCase__ = math.comb(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase )
UpperCAmelCase__ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 98 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = ShapEImgaImgPipeline
snake_case__ = ["image"]
snake_case__ = ["image"]
snake_case__ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def __lowerCAmelCase ( self : List[str] ):
return 32
@property
def __lowerCAmelCase ( self : str ):
return 32
@property
def __lowerCAmelCase ( self : int ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : List[Any] ):
return 8
@property
def __lowerCAmelCase ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
UpperCAmelCase__ = CLIPVisionModel(lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = CLIPImageProcessor(
crop_size=224 ,do_center_crop=lowerCamelCase__ ,do_normalize=lowerCamelCase__ ,do_resize=lowerCamelCase__ ,image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,resample=3 ,size=224 ,)
return image_processor
@property
def __lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
UpperCAmelCase__ = PriorTransformer(**lowerCamelCase__ )
return model
@property
def __lowerCAmelCase ( self : Tuple ):
torch.manual_seed(0 )
UpperCAmelCase__ = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ = ShapERenderer(**lowerCamelCase__ )
return model
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = self.dummy_prior
UpperCAmelCase__ = self.dummy_image_encoder
UpperCAmelCase__ = self.dummy_image_processor
UpperCAmelCase__ = self.dummy_renderer
UpperCAmelCase__ = HeunDiscreteScheduler(
beta_schedule='exp' ,num_train_timesteps=1_024 ,prediction_type='sample' ,use_karras_sigmas=lowerCamelCase__ ,clip_sample=lowerCamelCase__ ,clip_sample_range=1.0 ,)
UpperCAmelCase__ = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : str=0 ):
UpperCAmelCase__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase__ = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = 'cpu'
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCAmelCase__ = output.images[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase__ = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Tuple ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = torch_device == 'cpu'
UpperCAmelCase__ = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=lowerCamelCase__ ,relax_max_difference=lowerCamelCase__ ,)
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ = batch_size * [inputs[key]]
UpperCAmelCase__ = pipe(**lowerCamelCase__ ,num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
UpperCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
UpperCAmelCase__ = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
UpperCAmelCase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
UpperCAmelCase__ = pipe(
lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='np' ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ ,lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
UpperCAmelCase__ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | """simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,lowerCamelCase__ : int="</s>" ,lowerCamelCase__ : str="<unk>" ,lowerCamelCase__ : Union[str, Any]="<pad>" ,lowerCamelCase__ : int=125 ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : Union[str, Any] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase__ = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase__ = len(set(filter(lambda lowerCamelCase__ : bool('extra_id' in str(lowerCamelCase__ ) ) ,lowerCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token
super().__init__(
eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,extra_ids=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = extra_ids
UpperCAmelCase__ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase__ = len(self.special_tokens_encoder )
UpperCAmelCase__ = len(lowerCamelCase__ )
for i, token in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = self.vocab_size + i - n
UpperCAmelCase__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ):
if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
return token_ids_a + token_ids_a
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = [chr(lowerCamelCase__ ) for i in text.encode('utf-8' )]
return tokens
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : str ):
if token in self.special_tokens_encoder:
UpperCAmelCase__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = self.added_tokens_encoder[token]
elif len(lowerCamelCase__ ) != 1:
UpperCAmelCase__ = self.unk_token_id
else:
UpperCAmelCase__ = ord(lowerCamelCase__ ) + self._num_special_tokens
return token_id
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Any ):
if index in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[index]
else:
UpperCAmelCase__ = chr(index - self._num_special_tokens )
return token
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = b''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
else:
UpperCAmelCase__ = bytes([ord(lowerCamelCase__ )] )
bstring += tok_string
UpperCAmelCase__ = bstring.decode('utf-8' ,errors='ignore' )
return string
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
return ()
| 98 | 1 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=1_0_2_4 ):
UpperCAmelCase__ , UpperCAmelCase__ = [], []
UpperCAmelCase__ = list(zip(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ , UpperCAmelCase__ = sorted_examples[0]
def is_too_big(lowerCamelCase ):
return tok(lowerCamelCase , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCAmelCase__ = new_src + ' ' + src
UpperCAmelCase__ = new_tgt + ' ' + tgt
if is_too_big(lowerCamelCase ) or is_too_big(lowerCamelCase ): # cant fit, finalize example
finished_src.append(lowerCamelCase )
finished_tgt.append(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ = src, tgt
else: # can fit, keep adding
UpperCAmelCase__ , UpperCAmelCase__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCamelCase )
finished_tgt.append(lowerCamelCase )
return finished_src, finished_tgt
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = Path(lowerCamelCase )
save_path.mkdir(exist_ok=lowerCamelCase )
for split in ["train"]:
UpperCAmelCase__ , UpperCAmelCase__ = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
UpperCAmelCase__ = [x.rstrip() for x in Path(lowerCamelCase ).open().readlines()]
UpperCAmelCase__ = [x.rstrip() for x in Path(lowerCamelCase ).open().readlines()]
UpperCAmelCase__ , UpperCAmelCase__ = pack_examples(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
print(f'''packed {split} split from {len(lowerCamelCase )} examples -> {len(lowerCamelCase )}.''' )
Path(save_path / f'''{split}.source''' ).open('w' ).write('\n'.join(lowerCamelCase ) )
Path(save_path / f'''{split}.target''' ).open('w' ).write('\n'.join(lowerCamelCase ) )
for split in ["val", "test"]:
UpperCAmelCase__ , UpperCAmelCase__ = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(lowerCamelCase , save_path / f'''{split}.source''' )
shutil.copyfile(lowerCamelCase , save_path / f'''{split}.target''' )
def a_ ( ):
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=lowerCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=lowerCamelCase , default=1_2_8 )
parser.add_argument('--data_dir' , type=lowerCamelCase )
parser.add_argument('--save_path' , type=lowerCamelCase )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowerCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 98 | """simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
lowerCAmelCase__ : Union[str, Any] = namedtuple('CoinsDistribResult', 'moves excess')
def a_ ( lowerCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.left )
UpperCAmelCase__ , UpperCAmelCase__ = get_distrib(node.right )
UpperCAmelCase__ = 1 - left_distrib_excess
UpperCAmelCase__ = 1 - right_distrib_excess
UpperCAmelCase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
UpperCAmelCase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase , lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a_ ( ):
UpperCAmelCase__ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
UpperCAmelCase__ = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase )
# Let's go
UpperCAmelCase__ = parser.parse_args()
if not hasattr(lowerCamelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase__ = args.func(lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 98 | """simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def __lowerCAmelCase ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
def __lowerCAmelCase ( self : int ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 98 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = (PNDMScheduler,)
snake_case__ = (("num_inference_steps", 50),)
def __lowerCAmelCase ( self : List[str] ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase__ )
return config
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[str]=0 ,**lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : int ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**lowerCamelCase__ )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop('num_inference_steps' ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,'set_timesteps' ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,0 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(lowerCamelCase__ ,1 ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def __lowerCAmelCase ( self : Dict ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
def __lowerCAmelCase ( self : int ):
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.0_1 )
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 98 | """simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ : Optional[Any] = logging.getLogger()
def a_ ( ):
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
UpperCAmelCase__ = parser.parse_args()
return args.f
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'run_glue_deebert.py' )
with patch.object(lowerCamelCase__ ,'argv' ,lowerCamelCase__ ):
UpperCAmelCase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ ,0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
UpperCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
| 98 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "mobilenet_v1"
def __init__( self : Optional[Any] ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[int]=224 ,lowerCamelCase__ : int=1.0 ,lowerCamelCase__ : Union[str, Any]=8 ,lowerCamelCase__ : str="relu6" ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Optional[Any]=0.9_9_9 ,lowerCamelCase__ : Optional[int]=0.0_2 ,lowerCamelCase__ : str=0.0_0_1 ,**lowerCamelCase__ : List[str] ,):
super().__init__(**lowerCamelCase__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = image_size
UpperCAmelCase__ = depth_multiplier
UpperCAmelCase__ = min_depth
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = tf_padding
UpperCAmelCase__ = classifier_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = version.parse("1.11" )
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def __lowerCAmelCase ( self : Optional[Any] ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def __lowerCAmelCase ( self : str ):
return 1e-4
| 98 | """simple docstring"""
import argparse
lowerCAmelCase__ : List[str] = 'docs/source/_static/js/custom.js'
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 98 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase = 1_0_0_0 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 98 | """simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a_ ( lowerCamelCase ):
return np.dot(lowerCamelCase , lowerCamelCase )
class snake_case :
"""simple docstring"""
def __init__( self : int ,*,
lowerCamelCase__ : float = np.inf ,lowerCamelCase__ : str = "linear" ,lowerCamelCase__ : float = 0.0 ,):
UpperCAmelCase__ = regularization
UpperCAmelCase__ = gamma
if kernel == "linear":
UpperCAmelCase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
UpperCAmelCase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase__ = f'''Unknown kernel: {kernel}'''
raise ValueError(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.dot(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : list[ndarray] ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = observations
UpperCAmelCase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
def to_minimize(lowerCamelCase__ : ndarray ) -> float:
UpperCAmelCase__ = 0
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(lowerCamelCase__ )
UpperCAmelCase__ = LinearConstraint(lowerCamelCase__ ,0 ,0 )
UpperCAmelCase__ = Bounds(0 ,self.regularization )
UpperCAmelCase__ = minimize(
lowerCamelCase__ ,np.ones(lowerCamelCase__ ) ,bounds=lowerCamelCase__ ,constraints=[ly_contraint] ).x
UpperCAmelCase__ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase__ = 0
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
UpperCAmelCase__ = s / n
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,lowerCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a_ ( lowerCamelCase ):
return np.dot(lowerCamelCase , lowerCamelCase )
class snake_case :
"""simple docstring"""
def __init__( self : int ,*,
lowerCamelCase__ : float = np.inf ,lowerCamelCase__ : str = "linear" ,lowerCamelCase__ : float = 0.0 ,):
UpperCAmelCase__ = regularization
UpperCAmelCase__ = gamma
if kernel == "linear":
UpperCAmelCase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
UpperCAmelCase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase__ = f'''Unknown kernel: {kernel}'''
raise ValueError(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.dot(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : ndarray ,lowerCamelCase__ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : list[ndarray] ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = observations
UpperCAmelCase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
def to_minimize(lowerCamelCase__ : ndarray ) -> float:
UpperCAmelCase__ = 0
((UpperCAmelCase__) , ) = np.shape(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(lowerCamelCase__ )
UpperCAmelCase__ = LinearConstraint(lowerCamelCase__ ,0 ,0 )
UpperCAmelCase__ = Bounds(0 ,self.regularization )
UpperCAmelCase__ = minimize(
lowerCamelCase__ ,np.ones(lowerCamelCase__ ) ,bounds=lowerCamelCase__ ,constraints=[ly_contraint] ).x
UpperCAmelCase__ = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase__ = 0
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
UpperCAmelCase__ = s / n
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : ndarray ):
UpperCAmelCase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,lowerCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | """simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase__ : List[Any] = '\\n\n'
lowerCAmelCase__ : Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
lowerCAmelCase__ : str = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : List[str]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ = 'cuda'
else:
UpperCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCAmelCase__ = model.to(lowerCamelCase__ )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ = model.config.max_length - 1
else:
UpperCAmelCase__ = model.config.max_length
UpperCAmelCase__ = tokenizer(
lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='pt' ,return_attention_mask=lowerCamelCase__ ,).to(lowerCamelCase__ )
UpperCAmelCase__ = encodings['input_ids']
UpperCAmelCase__ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ = []
UpperCAmelCase__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(lowerCamelCase__ ) ,lowerCamelCase__ ) ):
UpperCAmelCase__ = min(start_index + batch_size ,len(lowerCamelCase__ ) )
UpperCAmelCase__ = encoded_texts[start_index:end_index]
UpperCAmelCase__ = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase__ )
UpperCAmelCase__ = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
UpperCAmelCase__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(lowerCamelCase__ ), attn_mask] ,dim=1 )
UpperCAmelCase__ = encoded_batch
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ).logits
UpperCAmelCase__ = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,lowerCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase__ )}
| 98 | 1 |
"""simple docstring"""
from statistics import mean
import numpy as np
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = 0
# Number of processes finished
UpperCAmelCase__ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
UpperCAmelCase__ = [0] * no_of_process
# List to include calculation results
UpperCAmelCase__ = [0] * no_of_process
# Sort by arrival time.
UpperCAmelCase__ = [burst_time[i] for i in np.argsort(lowerCamelCase )]
UpperCAmelCase__ = [process_name[i] for i in np.argsort(lowerCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
UpperCAmelCase__ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
UpperCAmelCase__ = arrival_time[i]
UpperCAmelCase__ = 0
# Index showing the location of the process being performed
UpperCAmelCase__ = 0
# Saves the current response ratio.
UpperCAmelCase__ = 0
for i in range(0 , lowerCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
UpperCAmelCase__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
UpperCAmelCase__ = temp
UpperCAmelCase__ = i
# Calculate the turn around time
UpperCAmelCase__ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
UpperCAmelCase__ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = [0] * no_of_process
for i in range(0 , lowerCamelCase ):
UpperCAmelCase__ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCAmelCase__ : Dict = 5
lowerCAmelCase__ : Optional[Any] = ['A', 'B', 'C', 'D', 'E']
lowerCAmelCase__ : Optional[int] = [1, 2, 3, 4, 5]
lowerCAmelCase__ : List[str] = [1, 2, 3, 4, 5]
lowerCAmelCase__ : List[str] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCAmelCase__ : Union[str, Any] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 98 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : str = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "xglm"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any ,lowerCamelCase__ : Any=256_008 ,lowerCamelCase__ : Optional[Any]=2_048 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : List[str]=4_096 ,lowerCamelCase__ : Tuple=24 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Tuple=2 ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = ffn_dim
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = attention_heads
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = init_std
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = state_dict.pop(lowerCamelCase )
UpperCAmelCase__ = val
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase__ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
return new_state_dict
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[:2_5_6, :]
UpperCAmelCase__ = in_proj_bias[:2_5_6]
UpperCAmelCase__ = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase__ = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase__ = in_proj_weight[-2_5_6:, :]
UpperCAmelCase__ = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[:2_5_6, :]
UpperCAmelCase__ = in_proj_bias[:2_5_6]
UpperCAmelCase__ = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase__ = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase__ = in_proj_weight[-2_5_6:, :]
UpperCAmelCase__ = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCAmelCase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase__ = in_proj_weight_cross_attn[:2_5_6, :]
UpperCAmelCase__ = in_proj_bias_cross_attn[:2_5_6]
UpperCAmelCase__ = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
UpperCAmelCase__ = in_proj_bias_cross_attn[2_5_6:5_1_2]
UpperCAmelCase__ = in_proj_weight_cross_attn[-2_5_6:, :]
UpperCAmelCase__ = in_proj_bias_cross_attn[-2_5_6:]
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ = image.size
UpperCAmelCase__ = max(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 8_0_0 if 'detection' in checkpoint_url else 1_0_0_0
UpperCAmelCase__ = target_max_size / current_max_size
UpperCAmelCase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = F.to_tensor(lowerCamelCase )
UpperCAmelCase__ = F.normalize(lowerCamelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
logger.info('Converting model...' )
# load original state dict
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = rename_backbone_keys(lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase__ = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
UpperCAmelCase__ = state_dict.pop(lowerCamelCase )
UpperCAmelCase__ = val
# create HuggingFace model and load state dict
UpperCAmelCase__ = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCAmelCase__ = 1_5
UpperCAmelCase__ = 2
UpperCAmelCase__ = {0: 'table', 1: 'table rotated'}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase__ = 1_2_5
UpperCAmelCase__ = 6
UpperCAmelCase__ = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = DetrImageProcessor(
format='coco_detection' , max_size=8_0_0 if 'detection' in checkpoint_url else 1_0_0_0 )
UpperCAmelCase__ = TableTransformerForObjectDetection(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# verify our conversion
UpperCAmelCase__ = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
UpperCAmelCase__ = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=lowerCamelCase )
UpperCAmelCase__ = Image.open(lowerCamelCase ).convert('RGB' )
UpperCAmelCase__ = normalize(resize(lowerCamelCase , lowerCamelCase ) ).unsqueeze(0 )
UpperCAmelCase__ = model(lowerCamelCase )
if "detection" in checkpoint_url:
UpperCAmelCase__ = (1, 1_5, 3)
UpperCAmelCase__ = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
UpperCAmelCase__ = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
UpperCAmelCase__ = (1, 1_2_5, 7)
UpperCAmelCase__ = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
UpperCAmelCase__ = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
UpperCAmelCase__ = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(lowerCamelCase )
image_processor.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 98 | """simple docstring"""
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def a_ ( lowerCamelCase , lowerCamelCase ):
if (
not isinstance(lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 1 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
UpperCAmelCase__ = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename='pytorch_model.bin' ) )
UpperCAmelCase__ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
UpperCAmelCase__ = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
UpperCAmelCase__ = tensor_value
UpperCAmelCase__ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
UpperCAmelCase__ = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ : int = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 98 | """simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCAmelCase__ : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ : Tuple = F"""down_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : int = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ : Tuple = F"""up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : int = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ : Any = F"""up_blocks.{i}.attentions.{j}."""
lowerCAmelCase__ : List[str] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ : List[str] = F"""down_blocks.{i}.downsamplers.0.conv."""
lowerCAmelCase__ : Union[str, Any] = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ : Dict = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : List[Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ : str = 'mid_block.attentions.0.'
lowerCAmelCase__ : Union[str, Any] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ : int = F"""mid_block.resnets.{j}."""
lowerCAmelCase__ : List[str] = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a_ ( lowerCamelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
UpperCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ : List[str] = F"""encoder.down_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : List[Any] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ : Dict = F"""down_blocks.{i}.downsamplers.0."""
lowerCAmelCase__ : str = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ : int = F"""up_blocks.{i}.upsamplers.0."""
lowerCAmelCase__ : str = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ : Dict = F"""decoder.up_blocks.{i}.resnets.{j}."""
lowerCAmelCase__ : Optional[int] = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ : Any = F"""mid_block.resnets.{i}."""
lowerCAmelCase__ : Any = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def a_ ( lowerCamelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase__ = v.replace(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = v
UpperCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase__ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
UpperCAmelCase__ = reshape_weight_for_sd(lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCAmelCase__ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ : int = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
UpperCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
UpperCAmelCase__ = k[: -len('.q_proj.weight' )]
UpperCAmelCase__ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
UpperCAmelCase__ = k[: -len('.q_proj.bias' )]
UpperCAmelCase__ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase__ = [None, None, None]
UpperCAmelCase__ = v
continue
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
UpperCAmelCase__ = textenc_pattern.sub(lambda lowerCamelCase : protected[re.escape(m.group(0 ) )] , lowerCamelCase )
UpperCAmelCase__ = torch.cat(lowerCamelCase )
return new_state_dict
def a_ ( lowerCamelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ : Tuple = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : List[str] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCAmelCase__ : int = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ : Union[str, Any] = load_file(unet_path, device='cpu')
else:
lowerCAmelCase__ : str = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : Dict = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCAmelCase__ : Optional[Any] = load_file(vae_path, device='cpu')
else:
lowerCAmelCase__ : Optional[int] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCAmelCase__ : List[str] = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCAmelCase__ : Tuple = load_file(text_enc_path, device='cpu')
else:
lowerCAmelCase__ : Any = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCAmelCase__ : Any = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCAmelCase__ : Any = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ : Dict = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ : List[Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ : str = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ : List[Any] = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ : Tuple = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ : List[str] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ : str = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ : Optional[Any] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ : List[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ : int = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ : List[str] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 98 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.